diff --git a/.github/issue_template.md b/.github/issue_template.md index b396dbc73f..60318f794c 100644 --- a/.github/issue_template.md +++ b/.github/issue_template.md @@ -9,5 +9,5 @@ node -v npm -v # (or yarn -v) node -e "console.log(process.platform)" node -e "console.log(require('os').release())" -node -e "console.log(console.log(process.arch))" +node -e "console.log(process.arch)" ``` diff --git a/.github/workflows/build-openssl-packages.yml b/.github/workflows/build-openssl-packages.yml new file mode 100644 index 0000000000..7b7a5dea3e --- /dev/null +++ b/.github/workflows/build-openssl-packages.yml @@ -0,0 +1,48 @@ +name: Build and Publish OpenSSL Packages + +on: + workflow_dispatch: + +jobs: + build-openssl: + name: Build OpenSSL package for (${{ matrix.os }} ${{ matrix.arch }}) + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: windows-latest + arch: arm64 + - os: windows-latest + arch: x64 + - os: macos-15 + arch: arm64 + - os: macos-15-intel + arch: x64 + fail-fast: false + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Install dependencies + run: npm install + + - name: Build OpenSSL packages + env: + npm_config_arch: ${{ matrix.arch }} + NODEGIT_OPENSSL_BUILD_PACKAGE: 1 + OPENSSL_MACOS_DEPLOYMENT_TARGET: "11.0" + run: node utils/acquireOpenSSL.mjs + + - name: Push OpenSSL package to S3 + env: + npm_config_arch: ${{ matrix.arch }} + node_pre_gyp_bucket: ${{ secrets.node_pre_gyp_bucket }} + AWS_ACCESS_KEY_ID: ${{ secrets.node_pre_gyp_accessKeyId }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.node_pre_gyp_secretAccessKey }} + run: node utils/uploadOpenSSL.mjs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000000..3cd6aae10c --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,196 @@ +on: + push: + branches: + - master + - backport/* + tags: + - v*.*.* + pull_request: + +name: Testing + +jobs: + linux-tests: + name: "Linux Tests" + strategy: + matrix: + node: [20, 22, 24] + fail-fast: false + runs-on: ubuntu-22.04 + steps: + - name: Install Dependencies for Ubuntu + run: sudo apt-get update && sudo apt-get install -y software-properties-common git build-essential clang libssl-dev libkrb5-dev libc++-dev wget zlib1g-dev + + - uses: actions/checkout@v4 + + - name: Setup Environment + run: | + set -e + mkdir ~/.ssh_tests + chmod 700 ~/.ssh_tests + printf "%b" "Host *\n\tStrictHostKeyChecking no\n" > ~/.ssh_tests/config + cat test/id_rsa.pub > ~/.ssh_tests/id_rsa.pub + cat test/id_rsa.enc | base64 -d > ~/.ssh_tests/id_rsa + chmod 600 ~/.ssh_tests/id_rsa* + git config --global user.name "John Doe" + git config --global user.email johndoe@example.com + + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Use Node.js + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node }} + check-latest: true + + - name: Install + run: npm install + + - name: Test + run: | + set -e + eval `ssh-agent -s` + ssh-add ~/.ssh_tests/id_rsa + node utils/retry npm test + + - name: Deploy + if: startsWith(github.ref, 'refs/tags/v') + env: + node_pre_gyp_bucket: ${{ secrets.node_pre_gyp_bucket }} + AWS_ACCESS_KEY_ID: ${{ secrets.node_pre_gyp_accessKeyId }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.node_pre_gyp_secretAccessKey }} + run: | + npm install -g @mapbox/node-pre-gyp aws-sdk + node lifecycleScripts/clean + node-pre-gyp package + node-pre-gyp publish + + macos-tests: + name: "macOS Tests" + strategy: + matrix: + node: [20, 22, 24] + arch: [x64, arm64] + fail-fast: false + runs-on: ${{ matrix.arch == 'x64' && 'macos-15-intel' || 'macos-15' }} + steps: + - uses: actions/checkout@v4 + + - name: Setup Environment + run: | + mkdir ~/.ssh_tests + chmod 700 ~/.ssh_tests + printf "%b" "Host *\n\tStrictHostKeyChecking no\n" > ~/.ssh_tests/config + cat test/id_rsa.pub > ~/.ssh_tests/id_rsa.pub + cat test/id_rsa.enc | base64 -d > ~/.ssh_tests/id_rsa + chmod 600 ~/.ssh_tests/id_rsa* + git config --global user.name "John Doe" + git config --global user.email johndoe@example.com + + - name: Use Node.js + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node }} + check-latest: true + + - name: Install + run: npm install + + - name: Test + run: | + set -e + eval `ssh-agent -s` + ssh-add ~/.ssh_tests/id_rsa + node utils/retry npm test + + - name: Deploy + if: startsWith(github.ref, 'refs/tags/v') + env: + node_pre_gyp_bucket: ${{ secrets.node_pre_gyp_bucket }} + AWS_ACCESS_KEY_ID: ${{ secrets.node_pre_gyp_accessKeyId }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.node_pre_gyp_secretAccessKey }} + run: | + npm install -g @mapbox/node-pre-gyp aws-sdk + node lifecycleScripts/clean + node-pre-gyp package + node-pre-gyp publish + + windows-tests: + name: Windows Tests + strategy: + matrix: + node: [20, 22, 24] + arch: [x86, x64, arm64] + exclude: + - node: 24 + arch: x86 + fail-fast: false + runs-on: windows-2022 + steps: + - name: Setup Environment + run: | + git config --file C:\ProgramData\Git\config core.autocrlf input + git config --system core.autocrlf input + git config --global core.autocrlf input + git config --global user.name "John Doe" + git config --global user.email johndoe@example.com + + - uses: actions/checkout@v4 + + - name: Use Node.js + if: matrix.arch == 'x86' + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node }} + check-latest: true + architecture: x86 + + - name: Use Node.js + uses: actions/setup-node@v4 + if: matrix.arch != 'x86' + with: + node-version: ${{ matrix.node }} + check-latest: true + + - name: Install + env: + npm_config_arch: ${{ matrix.arch == 'x86' && 'ia32' || matrix.arch }} + run: npm install + + - name: Test + # need arm64 runners or an emulator to run tests + if: matrix.arch != 'arm64' + env: + GIT_SSH: ${{ github.workspace }}\vendor\plink.exe + run: | + $encodedKey = Get-Content -Path test\private.ppk.enc + $finalPath = Join-Path -Path $HOME -ChildPath .ssh_tests\private.ppk + mkdir ~\.ssh_tests + Set-Content -Value $([System.Convert]::FromBase64String($encodedKey)) -Path $finalPath -AsByteStream + powershell -command "Start-Process .\vendor\pageant\pageant_${{ matrix.arch }}.exe $finalPath" + node utils/retry npm test + + # You're probably wondering why this isn't a single `run: |` step, it certainly is for *nix, + # but it's not, because the CI runner for windows doesn't wait for each step as listed here + # and it treats each additional step past the first as an orphaned process. + - name: Deploy (Dependencies) + if: startsWith(github.ref, 'refs/tags/v') + run: npm install -g @mapbox/node-pre-gyp aws-sdk + + - name: Deploy (Clean) + if: startsWith(github.ref, 'refs/tags/v') + run: node lifecycleScripts\clean + + - name: Deploy (Package) + if: startsWith(github.ref, 'refs/tags/v') + run: node-pre-gyp package --target_arch=${{ matrix.arch }} + + - name: Deploy (Publish) + if: startsWith(github.ref, 'refs/tags/v') + env: + node_pre_gyp_bucket: ${{ secrets.node_pre_gyp_bucket }} + AWS_ACCESS_KEY_ID: ${{ secrets.node_pre_gyp_accessKeyId }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.node_pre_gyp_secretAccessKey }} + run: node-pre-gyp publish --target_arch=${{ matrix.arch }} diff --git a/.gitignore b/.gitignore index 3d571d1c78..7d1f15f049 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ /build/ /coverage/ -/dist/ /include/ /lib/enums.js /lib/nodegit.js @@ -35,13 +34,18 @@ /vendor/libssh2/src/stamp-h1 /vendor/libssh2/tests/.deps/ /vendor/libssh2/tests/Makefile +/vendor/libssh2/tests/ossfuzz/.deps +/vendor/libssh2/tests/ossfuzz/Makefile *.log .DS_STORE .idea +.clangd +.nyc_output/ .vscode jsconfig.json test/id_rsa test/nodegit-test-rsa +test/private.ppk diff --git a/.jshintrc b/.jshintrc index 0fd02f29b6..437878c8ce 100644 --- a/.jshintrc +++ b/.jshintrc @@ -1,8 +1,8 @@ { + "esversion": 9, "boss": true, "curly": true, "eqnull": true, - "esnext": true, "evil": true, "futurehostile": true, "globals": { @@ -16,7 +16,7 @@ "it": true }, "immed": false, - "maxlen": 80, + "maxlen": 120, "node": true, "predef": [ "Promise", diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 2b2add2f6d..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,124 +0,0 @@ -sudo: false -# update to Xenial in April 2019; Trusty will be EOL, Xenial new minimum supported OS version -dist: trusty - -branches: - only: - - master - - /^v\d+\.\d+\.\d+(-alpha\.\d+)?$/ - -compiler: clang -language: node_js - -# Stage order; the default stage is "test", which in our case is actually building and deploying -stages: - - "Extended testing" - - test - - "Deploy documentation" - -env: - - TARGET_ARCH="x64" - - TARGET_ARCH="ia32" - -node_js: - - "10" - - "8" - - "6" - -os: - - linux - - osx - -jobs: - exclude: - - os: osx - env: TARGET_ARCH="ia32" - include: - - stage: "Extended testing" - os: linux - dist: xenial - node_js: "8" - env: TARGET_ARCH="x64" EXTENDED_TESTING="false" SKIP_DEPLOY="true" - - stage: "Deploy documentation" - os: linux - dist: xenial - node_js: "8" - env: TARGET_ARCH="x64" DEPLOY_DOCUMENTATION="true" - - -git: - depth: 5 - -addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - build-essential - - libssl-dev - - gcc-4.9-multilib - - g++-4.9-multilib - - lcov - -before_install: - - export CC=clang - - export CXX=clang++ - - export npm_config_clang=1 - - export JOBS=4 - - - if [ -z "$TRAVIS_TAG" ] && [ "$EXTENDED_TESTING" == "true" ]; then - export GYP_DEFINES="coverage=1 use_obsolete_asm=true"; - export CC=/usr/bin/gcc-4.9; - export CXX=/usr/bin/g++-4.9; - export npm_config_clang=0; - wget http://downloads.sourceforge.net/ltp/lcov-1.10.tar.gz; - tar xvfz lcov-1.10.tar.gz; - else - export GYP_DEFINES="use_obsolete_asm=true"; - fi - -install: - - set -e; - - travis_retry npm install; - -# This is a random private key used purely for testing. -before_script: - - echo -e "Host *\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config - - echo -e "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBkHMoNRRkHYNE7EnQLdFxMgVcqGgNPYDhrWiLMlYuzpmEcUnhwW3zNaIa4J2JlGkRNgYZVia1Ic1V3koJPE3YO2+exAfJBIPeb6O1qDADc2hFFHzd28wmHKUkO61yzo2ZjDQfaEVtjN39Yiy19AbddN3bzNrgvuQT574fa6Rghl2RfecKYO77iHA1RGXIFc8heXVIUuUV/jHjb56WqoHH8vyt1DqUz89oyiHq8Cku0qzKN80COheZPseA1EvT0zlIgbXBxwijN4xRmvInK0fB5Kc9r3kddH2tT7V09bOFJsvGQaQmQ1WFTCqjpBFw1CHKcbfPLOxbLpVIR9gyx03R" > ~/.ssh/id_rsa.pub - - echo -e "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAwZBzKDUUZB2DROxJ0C3RcTIFXKhoDT2A4a1oizJWLs6ZhHFJ\n4cFt8zWiGuCdiZRpETYGGVYmtSHNVd5KCTxN2DtvnsQHyQSD3m+jtagwA3NoRRR8\n3dvMJhylJDutcs6NmYw0H2hFbYzd/WIstfQG3XTd28za4L7kE+e+H2ukYIZdkX3n\nCmDu+4hwNURlyBXPIXl1SFLlFf4x42+elqqBx/L8rdQ6lM/PaMoh6vApLtKsyjfN\nAjoXmT7HgNRL09M5SIG1wccIozeMUZryJytHweSnPa95HXR9rU+1dPWzhSbLxkGk\nJkNVhUwqo6QRcNQhynG3zyzsWy6VSEfYMsdN0QIDAQABAoIBABsZNPYBEFy/wPvq\nNJ8/et3lCdkh/oc0ABIYK9Wo82XUKKvhDF3drZ3p+UrX/VYgf+EX9hyf8gVTuSJ3\nX1gRqDhIgeTxPsHGrwt6B6pL5ITnKEbbimuo9Ni1E+2RqUO0ZSCE/1sSRv4CRaXO\nk8HZawif7ttxv4bNUrLys6xEbpvQlOMzgs4s/OBB/XMEqnFRGPJeeTy8bkOWyTwl\nLj06nq2brs4qK4eijI/MoGy1CD8JCpL4gG39GPTXd8GpudXmdelDn1E0t9nhL6Se\naOMaiPhy7kBJD4wZ//WZTSR1XyjNBH3DGkNZxPIWcX+wJFyNoLbSbVSda/7Dtvp3\nCPfiNhECgYEA/+3JswSzcVEANNF5OLZ76x+TODkZ9T6YF4SR8/uJjNViWgUpX7vw\nmyXF+2AwzNaotbBKmNG619BcUeMmQB76c+UiMLeJuJcT/Jj0xmEUopHonGqEIcvg\nHg6cafE1is7d+l669bfjitlx+3muF2CYnylSN1LWHxIITVUj3BmcWqUCgYEAwZ45\nWdaHfK7G6GjI7liDQT4ZlslA8dmLv2Jl2ExBBMoY3m3Sre428z2ZFa4O/nsBYP0a\nDxgYmX20fQGcbPugKdCYHc7HkKbMU1GwiVCGpDYZCm2gJKTvam3dYNaiAfq5DyhP\nzDCZNJ5rrSMprXsuRv2O4c5u8qtJ5ByaOJBjOr0CgYBMlkAxzkpUssS5CaaZDiLv\nLbfEr3HRLjYdc5KpzLBQ8NpJzhmfiIJsK1Wf8B0qb2J1XJg2Oy0KwFOgPbWIoryY\nSg19Pq98Cdn1UWCOrSabr8ZIaKe55WTgGcc8/O3k6BsNfaO9PJZfSssNUlCCtml1\n18u+uo9RJPhPDBd7Gj7r8QKBgFraxWy7t24xkZMDgK4fiM/3tQhFvhz/CY2wPbxG\n5Ae8UfkmLcOCUfTIReqfd9fAnsAFZNIKa5izHRu/wsh9NwYIJSlvm8PsEVtTrPRy\nfgvWet+i24/2eYZGsag8b19gaLCNKQzXDT1czYg8RNVsRSX427BoLzXeXNkW9uNu\nFbI9AoGAV2kxcdcKS4BtNHKPeGgV87dM0DWhQaAtEXEIcQquFtba0lAXioGHg8U4\nzeiugl4Qzchwk5qd3wnZ4SOhx0s16/5gQDlnkbjFR6EREUnvLRwV92zBXUTOGIkh\nZ7Z4rcgUKlVAaHT3OHN/lTyqJG/ib+K4wZhbztl/ox+JUFsvD98=\n-----END RSA PRIVATE KEY-----" > ~/.ssh/id_rsa - - chmod 600 ~/.ssh/id_rsa* - - eval `ssh-agent -s` - - ssh-add ~/.ssh/id_rsa - - git config --global user.name "John Doe" - - git config --global user.email johndoe@example.com - -script: - if [ -z "$TRAVIS_TAG" ] && [ "$EXTENDED_TESTING" == "true" ]; then - travis_retry npm test && npm run cov && npm run coveralls; - else - travis_retry npm test; - fi - -after_success: - - if [ -n "$TRAVIS_TAG" ] && [ "$EXTENDED_TESTING" != "true" ] && [ "$DEPLOY_DOCUMENTATION" != "true" ] && [ "$SKIP_DEPLOY" != "true" ]; then - npm install -g node-pre-gyp; - npm install -g aws-sdk; - node lifecycleScripts/clean; - node-pre-gyp package --target_arch=$TARGET_ARCH; - node-pre-gyp publish --target_arch=$TARGET_ARCH; - fi - - - if [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ -n "$TRAVIS_TAG" ] && [ "$DEPLOY_DOCUMENTATION" == "true" ]; then - .travis/deploy-docs.sh; - fi - -notifications: - slack: - secure: KglNSqZiid9YudCwkPFDh+sZfW5BwFlM70y67E4peHwwlbbV1sSBPHcs74ZHP/lqgEZ4hMv4N2NI58oYFD5/1a+tKIQP1TkdIMuq4j2LXheuirA2HDcydOVrsC8kRx5XFGKdVRg/uyX2dlRHcOWFhxrS6yc6IxtxYWlRTD2SmEc= - - webhooks: - urls: - - https://webhooks.gitter.im/e/cbafdb27ad32ba746a73 - on_success: always # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: false # default: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 24a325fcec..d7c020aac7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,2041 @@ # Change Log +## v0.28.0-alpha.36 [(2025-11-21)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.36) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.35...v0.28.0-alpha.36) + +#### Summary of Changes + - Use openssl unconditionally for linux electron builds + - Fix cross-compiling libssh2 + - Fix Windows SSH keys, tests, documentation + - Add CI tests and Prebuilts for MacOS arm64 + - Bump tar-fsa to fix security vulnerabilities + +#### Merged PRs into NodeGit +- [Bump tar-fs from 3.0.9 to 3.1.1](https://github.com/nodegit/nodegit/pull/2034) +- [Use custom electron for non-static builds on linux and fix cross-compilation](https://github.com/nodegit/nodegit/pull/2033) +- [add macos arm64 tests and prebuilts](https://github.com/nodegit/nodegit/pull/2030) + +## v0.28.0-alpha.35 [(2025-11-14)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.35) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.34...v0.28.0-alpha.35) + +#### Summary of Changes + - Bump libgit2 to 1.9.1 + - Bump OpenSSL to 3.0 + - Move OpenSSL Packaging to Github Actions + - Add arm64 build Support + +#### Merged PRs into NodeGit +- [Bump libgit2 to 1.9.1](https://github.com/nodegit/nodegit/pull/2025) +- [Bump OpenSSL to 3.0, Move OpenSSL package generation to Github Actions](https://github.com/nodegit/nodegit/pull/2026) +- [fix: correct macos arch labels](github.com/nodegit/nodegit/pull/2027) +- [Add Ability to compile for arm64](https://github.com/nodegit/nodegit/pull/2028) + +## v0.28.0-alpha.34 [(2025-07-23)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.34) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.33...v0.28.0-alpha.34) + +#### Summary of Changes + - Empty release to fix downstream issues + +#### Merged PRs into NodeGit +- None + +## v0.28.0-alpha.33 [(2025-06-03)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.33) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.32...v0.28.0-alpha.33) + +#### Summary of Changes + - fix non-standard import assertion + - update tar-fs + +#### Merged PRs into NodeGit +- [Fix Invalid Import Assertion, Bump tar-fs](https://github.com/nodegit/nodegit/pull/2022) + +## v0.28.0-alpha.32 [(2025-05-28)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.32) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.31...v0.28.0-alpha.32) + +#### Summary of Changes + - fix windows build on electron + +#### Merged PRs into NodeGit +- [fix electron dependencies again](https://github.com/nodegit/nodegit/pull/2020) + +## v0.28.0-alpha.31 [(2025-05-27)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.31) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.30...v0.28.0-alpha.31) + +#### Summary of Changes +- bump libgit2 from v1.7.2 to v1.8.4 +- update several npm dependencies for deprecations and vulnerabilities + +#### Merged PRs into NodeGit +- [Dependency/Process Updates](https://github.com/nodegit/nodegit/pull/2019) +- [Bump libgit2 to 1.8.4, CI Updates](https://github.com/nodegit/nodegit/pull/2018) + +## v0.28.0-alpha.30 [(2025-02-13)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.30) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.29...v0.28.0-alpha.30) + +#### Summary of Changes +- Fix windows build + +#### Merged PRs into NodeGit +- [define NOMINMAX on windows](https://github.com/nodegit/nodegit/pull/2016) + +## v0.28.0-alpha.29 [(2025-02-11)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.29) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.28...v0.28.0-alpha.29) + +#### Summary of Changes +- Build on Electron 34+ +- fix use-after-free in Repository::statistics() + +#### Merged PRs into NodeGit +- [Bump @axosoft/nan and add ability to compile for c++20](https://github.com/nodegit/nodegit/pull/2012) +- [Fix Github Action workflow](https://github.com/nodegit/nodegit/pull/2014) + + +## v0.28.0-alpha.28 [(2024-07-01)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.28) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.27...v0.28.0-alpha.28) + +#### Summary of changes +- Build on Electron 31+ + +#### Merged PRs into NodeGit +- [Bump nan again for electron 31](https://github.com/nodegit/nodegit/pull/2000) + +## v0.28.0-alpha.27 [(2024-06-06)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.27) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.26...v0.28.0-alpha.27) + +#### Summary of changes +- Build on Electron 29+ + +#### Merged PRs into NodeGit +- [Fix build failure on electron 29+](https://github.com/nodegit/nodegit/pull/1998) + +## v0.28.0-alpha.26 [(2024-04-19)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.26) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.25...v0.28.0-alpha.26) + +#### Summary of changes +- Fix use-after-free in getRemotes + +#### Merged PRs into NodeGit +- [Fix double-free introduced trying to fix other double-free](https://github.com/nodegit/nodegit/pull/1996) + +## v0.28.0-alpha.25 [(2024-04-15)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.25) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.24...v0.28.0-alpha.25) + +#### Summary of changes +- Fix use-after-free in getReferences + +#### Merged PRs into NodeGit +- [Don't free the given repo on error in getReferences and getRemotes](https://github.com/nodegit/nodegit/pull/1995) + +## v0.28.0-alpha.24 [(2024-02-20)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.24) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.23...v0.28.0-alpha.24) + +#### Summary of changes +- Use Collision Detection SHA1 implementation +- Fix win32 Electron build due to incorrect OpenSSL include path + +#### Merged PRs into NodeGit +- [Use builtin SHA1 for libgit compilation](https://github.com/nodegit/nodegit/pull/1992) +- [Ensure OpenSSL root included in win32 Electron builds](https://github.com/nodegit/nodegit/pull/1991) + +## v0.28.0-alpha.23 [(2024-02-14)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.23) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.22...v0.28.0-alpha.23) + +#### Summary of changes +- Bump libgit2 to 1.7.2 + +#### Merged PRs into NodeGit +- [Bump libgit2 to 1.7.2](https://github.com/nodegit/nodegit/pull/1990) + +## v0.28.0-alpha.22 [(2024-02-05)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.22) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.21...v0.28.0-alpha.22) + +#### Summary of changes +- Compatibility with Electron 28 +- NodeGit now requires Node 16+ + +#### Merged PRs into NodeGit +- [Fix electron 28 build failure](https://github.com/nodegit/nodegit/pull/1988) +- [Bump node-gyp to 10.0.1](https://github.com/nodegit/nodegit/pull/1989) + +## v0.28.0-alpha.21 [(2023-02-10)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.21) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.20...v0.28.0-alpha.21) + +#### Summary of changes +- Update OpenSSL to 1.1.1t +- Update got + other packages with security vulnerabilities +- Fix tag.createWithSignature function definition + +#### Merged PRs into NodeGit +- [Bump OpenSSL to 1.1.1t](https://github.com/nodegit/nodegit/pull/1971) +- [Update got + other locked package versions](https://github.com/nodegit/nodegit/pull/1969) +- [Fix tag createWithSignature function](https://github.com/nodegit/nodegit/pull/1945) + +## v0.28.0-alpha.20 [(2022-11-11)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.20) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.19...v0.28.0-alpha.20) + +#### Summary of changes +- Fix electron build issues + +#### Merged PRs into NodeGit +- [Fix electron build issues](https://github.com/nodegit/nodegit/pull/1955) + +## v0.28.0-alpha.19 [(2022-11-08)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.19) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.18...v0.28.0-alpha.19) + +#### Summary of changes +- OpenSSL bump +- OpenSSL binaries will be automatically downloaded when building for Electron on Windows and macOS +- Crash fix on Electron 18+ due to Nan bug +- Partial stash support + +#### Merged PRs into NodeGit +- [Allow overriding C++ standard](https://github.com/nodegit/nodegit/pull/1953) +- [Bump OpenSSL to 1.1.1s](https://github.com/nodegit/nodegit/pull/1952) +- [Fix intermittent crash on Electron 18+](https://github.com/nodegit/nodegit/pull/1951) +- [type is a call](https://github.com/nodegit/nodegit/pull/1942) +- [Fix leak in agent](https://github.com/nodegit/nodegit/pull/1947) +- [Default to using precompiled OpenSSL for Electron](https://github.com/nodegit/nodegit/pull/1949) +- [Partial stash support](https://github.com/nodegit/nodegit/pull/1948) +- [Switch CI to macOS-11](https://github.com/nodegit/nodegit/pull/1950) +- [Preemptively Patch OpenSSL 1.1.1q](https://github.com/nodegit/nodegit/pull/1928) +- [Add getAllFilepaths function in tree object](https://github.com/nodegit/nodegit/pull/1919) + +## v0.28.0-alpha.18 [(2022-05-27)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.18) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.17...v0.28.0-alpha.18) + +#### Summary of changes +- Allow fetching partial patches from diff +- Fix nanosecond comparison typo + +#### Merged PRs into NodeGit +- [Improve Diff.patches to allow an index array](https://github.com/nodegit/nodegit/pull/1916) +- [Bring in GIT_USE_NSEC fix](https://github.com/nodegit/nodegit/pull/1917) + +## v0.28.0-alpha.17 [(2022-05-24)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.17) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.16...v0.28.0-alpha.17) + +#### Summary of changes +- Enable nanosecond precision for file operations + +#### Merged PRs into NodeGit +- [Enable GIT_USE_NSEC](https://github.com/nodegit/nodegit/pull/1912) + +## v0.28.0-alpha.16 [(2022-05-09)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.16) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.15...v0.28.0-alpha.16) + +#### Summary of changes +- Allow disabling specific filters during checkout + +#### Merged PRs into NodeGit +- [Allow disabling specific filters during checkout](https://github.com/nodegit/nodegit/pull/1911) + +## v0.28.0-alpha.15 [(2022-05-05)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.15) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.14...v0.28.0-alpha.15) + +#### Summary of changes +- Expose `GIT_OPT_GET_OWNER_VALIDATION` and `GIT_OPT_SET_OWNER_VALIDATION` + +#### Merged PRs into NodeGit +- [Expose get/set owner validation opts](https://github.com/nodegit/nodegit/pull/1910) + +## v0.28.0-alpha.14 [(2022-05-02)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.14) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.13...v0.28.0-alpha.14) + +#### Summary of changes +- Allow statically linking OpenSSL on Linux +- Update libgit2 to 1.3.1 + +#### Merged PRs into NodeGit +- [Statically compile OpenSSL on linux for electron](https://github.com/nodegit/nodegit/pull/1905) +- [Upgrade libgit2 to 1.3.1](https://github.com/nodegit/nodegit/pull/1894) + +## v0.28.0-alpha.13 [(2022-03-22)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.13) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.12...v0.28.0-alpha.13) + +#### Summary of changes +- Partially fix issue with building for Electron + +#### Merged PRs into NodeGit +- [Fix electron build](https://github.com/nodegit/nodegit/pull/1901) + +## v0.28.0-alpha.12 [(2022-03-18)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.12) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.11...v0.28.0-alpha.12) + +#### Summary of changes +- Updated CI because of GitHub deprecations +- Added workaround for LFS performance regression + +#### Merged PRs into NodeGit +- [Update windows 2016 CI to 2019](https://github.com/nodegit/nodegit/pull/1897) +- [Skip "can clone with git" test, unauthenticated git protocol is no longer supported in Github](https://github.com/nodegit/nodegit/pull/1899) +- [UNSAFE Temporary workaround for LFS checkout performance regression](https://github.com/nodegit/nodegit/pull/1883) +- [Update Github Actions for node 16](https://github.com/nodegit/nodegit/pull/1896) + +## v0.28.0-alpha.11 [(2022-02-08)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.11) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.10...v0.28.0-alpha.11) + +#### Summary of changes +- Updated libssh2 to add RSA SHA2 256/512 SSH key support + +#### Merged PRs into NodeGit +- [RSA SHA2 256/512 key upgrade support RFC 8332 #536 (#626)](https://github.com/nodegit/nodegit/pull/1888) +- [Fix typos in examples](https://github.com/nodegit/nodegit/pull/1884) +- [Don't build shared OpenSSL libs](https://github.com/nodegit/nodegit/pull/1877) + +## v0.28.0-alpha.10 [(2021-11-11)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.10) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.9...v0.28.0-alpha.10) + +#### Summary of changes +- Reworked CI due to GitHub dropping Ubuntu 16.04 support +- When building for Electron on Windows/macOS and prebuilts are unavailable: NodeGit will attempt to build OpenSSL locally by default. This is due to Conan changing their API/provided OpenSSL binaries. There are options for pointing to an installed OpenSSL location or URL for downloading prebuilt binaries, see [Building from source](http://www.nodegit.org/guides/install/from-source/). +- Updated OpenSSL to 1.1.1l +- Updated libssh2 to 1.10.0 +- Added `Repo.prototype.statistics` method for calculating repository statistics +- More progress towards becoming context-aware + +#### Merged PRs into NodeGit +- [Allow download of prebuilt OpenSSL](https://github.com/nodegit/nodegit/pull/1875) +- [Update libssh2 to 1.10.0](https://github.com/nodegit/nodegit/pull/1874) +- [Statistics with same output as "git-sizer -j"](https://github.com/nodegit/nodegit/pull/1846) +- [Fix memory leak on context shutdown](https://github.com/nodegit/nodegit/pull/1856) +- [Build OpenSSL locally for Electron](https://github.com/nodegit/nodegit/pull/1870) +- [Fix a reference error when compiling with VC2019](https://github.com/nodegit/nodegit/pull/1859) +- [Use containers for Linux CI](https://github.com/nodegit/nodegit/pull/1860) + + +## v0.28.0-alpha.9 [(2021-06-04)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.9) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.8...v0.28.0-alpha.9) + +#### Summary of changes +- Fixes an issue where rebase.init and rebase.open were ignoring callbacks in some situations + +#### Merged PRs into NodeGit +- [Shallow clone rebase options before modifying #1845](https://github.com/nodegit/nodegit/pull/1845) + + +## v0.28.0-alpha.8 [(2021-05-10)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.8) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.7...v0.28.0-alpha.8) + +#### Summary of changes +- Fixes another issue where Kerberos proxy authentication causes network failures + +#### Merged PRs into NodeGit +- [Bump libgit2 to include libgit2#5852 #1844](https://github.com/nodegit/nodegit/pull/1844) + +#### Merged PRs into Libgit2 +- [Fix issues with Proxy Authentication after httpclient refactor #5852](https://github.com/libgit2/libgit2/pull/5852) + + +## v0.28.0-alpha.7 [(2021-04-30)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.7) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.6...v0.28.0-alpha.7) + +#### Summary of changes +- Fixes issue with where proxy authentication fails on linux/osx with assertion error. + +#### Merged PRs into NodeGit +- [Bump Libgit2 to fix proxy auth on linux / osx #1841](https://github.com/nodegit/nodegit/pull/1841) + +#### Merged PRs into Libgit2 +- [https://github.com/libgit2/libgit2/pull/5852](https://github.com/libgit2/libgit2/pull/5852) + + +## v0.28.0-alpha.6 [(2021-04-23)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.6) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.5...v0.28.0-alpha.6) + +#### Summary of changes +- Rewrote options normalization such that it is done in C++. Provided deprecated classes for backwards compatibility. These will be removed in a later version of Nodegit. +- Deprecated construction of these objects, in the future, please pass plain objects with just the fields you wish to override to NodeGit, and the library will take care of the rest. + - NodeGit.ApplyOptions + - NodeGit.BlameOptions + - NodeGit.BlobFilterOptions + - NodeGit.CheckoutOptions + - NodeGit.CherrypickOptions + - NodeGit.CloneOptions + - NodeGit.DescribeFormatOptions + - NodeGit.DiffFindOptions + - NodeGit.FetchOptions + - NodeGit.MergeFileInput + - NodeGit.MergeFileOptions + - NodeGit.MergeOptions + - NodeGit.ProxyOptions + - NodeGit.PushOptions + - NodeGit.RebaseOptions + - NodeGit.RemoteCreatOptions + - NodeGit.RepositoryInitOptions + - NodeGit.RevertOptions + - NodeGit.StashApplyOptions + - NodeGit.StatusOptions + - NodeGit.SubmoduleUpdateOptions +- Ensured the following functions have their optional arguments labeled/working as optional: + - NodeGit.Apply + - NodeGit.Checkout.index + - NodeGit.Cherrypick + - NodeGit.Cherrypick.commit + - NodeGit.Merge + - NodeGit.PatchBlobs + - NodeGit.Rebase.open + - NodeGit.Remote.prototype.connect + - NodeGit.Remote.prototype.download + - NodeGit.Remote.prototype.fetch + - NodeGit.Remote.prototype.prune + - NodeGit.Remote.prototype.push + - NodeGit.Remote.prototype.upload + - NodeGit.Stash.apply + - NodeGit.Stash.pop + - NodeGit.Worktree.isPrunable + - NodeGit.Worktree.prune +- Updated the following functions to be async: + - NodeGit.Apply + - NodeGit.Remote.prototype.prune + - NodeGit.Worktree.isPrunable + - NodeGit.Worktree.prune +- Addressed issue where GitWorktreePruneOptions and GitWorktreeAddOptions were impossible to instantiate, thus making working with worktress possible now. +- Addressed issue where GitIndexTime was not configurable +- Addressed issue where the following functions did not return errors from libgit2: + - NodeGit.Merge.analysis + - NodeGit.Note.commitRemove + +#### Merged PRs into NodeGit +- [Eliminate need for normalize options #1837](https://github.com/nodegit/nodegit/pull/1837) +- [Define optional arguments for Patch.fromBlobs() #1835](https://github.com/nodegit/nodegit/pull/1835) + + +## v0.28.0-alpha.5 [(2021-04-09)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.5) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.4...v0.28.0-alpha.5) + +#### Summary of changes +- Fixes crash in multithreaded checkout in fork of libgit2 + +#### Merged PRs into NodeGit +- [Update multithreaded checkout in libgit2 #1834](https://github.com/nodegit/nodegit/pull/1834) + +#### Merged PRs into Libgit2 +- [Default to GIT_BRANCH_DEFAULT if init.defaultBranch is empty string](https://github.com/libgit2/libgit2/pull/5832) +- [Remove duplicate line, in example code](https://github.com/libgit2/libgit2/pull/5821) + + +## v0.28.0-alpha.4 [(2021-04-07)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.4) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.3...v0.28.0-alpha.4) + +#### Summary of changes +- Introduces harder safeguards on persistent references to prevent garbage collection during async work + +#### Merged PRs into NodeGit +- [Workaround: Prevent objectwrap from being cleaned up during async work #1833](https://github.com/nodegit/nodegit/pull/1833) + + +## v0.28.0-alpha.3 [(2021-04-02)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.3) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.2...v0.28.0-alpha.3) + +#### Summary of changes +- Addresses failure to open repo with empty default branch name + +#### Merged PRs into NodeGit +- [Bring in changes from libgit2 #5832 #1832](https://github.com/nodegit/nodegit/pull/1832) + +#### Cherrypicked PRs into Libgit2 +- [Default to GIT_BRANCH_DEFAULT if init.defaultBranch is empty string #5832](https://github.com/libgit2/libgit2/pull/5832) + + +## v0.28.0-alpha.2 [(2021-03-31)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.2) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.28.0-alpha.1...v0.28.0-alpha.2) + +#### Summary of changes +- Addresses crash in mwindow from libgit2 +- Bumps libgit2 to bring in bug fixes + +#### Merged PRs into NodeGit +- [Bump Libgit2 to 1.1.0 (on current head of libgit2) #1831](https://github.com/nodegit/nodegit/pull/1831) + +#### Merged PRs into Libgit2 +- [tree: deprecate `git_treebuilder_write_with_buffer`](https://github.com/libgit2/libgit2/pull/5815) +- [winhttp: skip certificate check if unable to send request](https://github.com/libgit2/libgit2/pull/5814) +- [commit-graph: Introduce `git_commit_graph_needs_refresh()`](https://github.com/libgit2/libgit2/pull/5764) +- [commit-graph: Support lookups of entries in a commit-graph](https://github.com/libgit2/libgit2/pull/5763) +- [merge: Check insert_head_ids error in create_virtual_base](https://github.com/libgit2/libgit2/pull/5818) +- [Check git_signature_dup failure](https://github.com/libgit2/libgit2/pull/5817) +- [Fix some typos](https://github.com/libgit2/libgit2/pull/5797) +- [include: fix typos in comments](https://github.com/libgit2/libgit2/pull/5805) +- [Fix documentation formating on repository.h](https://github.com/libgit2/libgit2/pull/5806) +- [index: Check git_vector_dup error in write_entries](https://github.com/libgit2/libgit2/pull/5801) +- [refdb_fs: Check git_sortedcache wlock/rlock errors](https://github.com/libgit2/libgit2/pull/5800) +- [Add new bindings for the R language](https://github.com/libgit2/libgit2/pull/5795) +- [Update .gitignore](https://github.com/libgit2/libgit2/pull/5787) +- [patch: add owner accessor](https://github.com/libgit2/libgit2/pull/5731) +- [commit-graph: Introduce a parser for commit-graph files](https://github.com/libgit2/libgit2/pull/5762) +- [revspec: rename git_revparse_mode_t to git_revspec_t](https://github.com/libgit2/libgit2/pull/5786) +- [mwindow: Fix a bug in the LRU window finding code](https://github.com/libgit2/libgit2/pull/5783) +- [ci: don't use ninja on macOS](https://github.com/libgit2/libgit2/pull/5780) +- [midx: Fix a bug in `git_midx_needs_refresh()`](https://github.com/libgit2/libgit2/pull/5768) +- [clone: set refs/remotes/origin/HEAD when branch is specified](https://github.com/libgit2/libgit2/pull/5775) +- [Use `p_pwrite`/`p_pread` consistently throughout the codebase](https://github.com/libgit2/libgit2/pull/5769) +- [README: instructions for using libgit2 without compiling](https://github.com/libgit2/libgit2/pull/5772) +- [Cope with empty default branch](https://github.com/libgit2/libgit2/pull/5770) +- [github-actions: Also rename the main branch here](https://github.com/libgit2/libgit2/pull/5771) +- [blob: fix name of `GIT_BLOB_FILTER_ATTRIBUTES_FROM_HEAD`](https://github.com/libgit2/libgit2/pull/5760) +- [Add documentation for git_blob_filter_options.version](https://github.com/libgit2/libgit2/pull/5759) +- [Build with NO_MMAP](https://github.com/libgit2/libgit2/pull/5583) +- [zlib: Add support for building with Chromium's zlib implementation](https://github.com/libgit2/libgit2/pull/5748) +- [Handle ipv6 addresses](https://github.com/libgit2/libgit2/pull/5741) +- [Add support for additional SSH hostkey types.](https://github.com/libgit2/libgit2/pull/5750) +- [Fix the `-DENABLE_WERROR=ON` build for gcc 10.2](https://github.com/libgit2/libgit2/pull/5749) +- [repository: use intptr_t's in the config map cache](https://github.com/libgit2/libgit2/pull/5746) +- [Add tests for `git__multiply_int64_overflow`](https://github.com/libgit2/libgit2/pull/5744) +- [Third attempt to fix the 32-bit version of `git__multiply_int64_overf…](https://github.com/libgit2/libgit2/pull/5743) +- [Avoid using `__builtin_mul_overflow` with the clang+32-bit combo](https://github.com/libgit2/libgit2/pull/5742) +- [ci: run codeql](https://github.com/libgit2/libgit2/pull/5709) +- [pack: continue zlib while we can make progress](https://github.com/libgit2/libgit2/pull/5740) +- [Re-enable the RC4 test](https://github.com/libgit2/libgit2/pull/4418) +- [Cache the parsed submodule config when diffing](https://github.com/libgit2/libgit2/pull/5727) +- [Make git__strntol64() ~70%* faster](https://github.com/libgit2/libgit2/pull/5735) +- [winhttp: support optional client cert](https://github.com/libgit2/libgit2/pull/5384) +- [git.git-authors: Replacing his/her with their](https://github.com/libgit2/libgit2/pull/5724) +- [Friendlier getting started in the lack of git_libgit2_init](https://github.com/libgit2/libgit2/pull/5578) +- [Thread-local storage: a generic internal library (with no allocations)](https://github.com/libgit2/libgit2/pull/5720) +- [Thread-free implementation](https://github.com/libgit2/libgit2/pull/5719) +- [Make the pack and mwindow implementations data-race-free](https://github.com/libgit2/libgit2/pull/5593) +- [Make the odb race-free](https://github.com/libgit2/libgit2/pull/5595) +- [Also add the raw hostkey to `git_cert_hostkey`](https://github.com/libgit2/libgit2/pull/5704) +- [Fix the `ENABLE_WERROR=ON` build in Groovy Gorilla (gcc 10.2)](https://github.com/libgit2/libgit2/pull/5715) +- [odb: Add git_odb_options](https://github.com/libgit2/libgit2/pull/5447) +- [Introduce GIT_ASSERT macros](https://github.com/libgit2/libgit2/pull/5327) +- [ci: only report main branch in README status](https://github.com/libgit2/libgit2/pull/5708) +- [ci: run coverity in the nightly builds](https://github.com/libgit2/libgit2/pull/5707) +- [ci: more GitHub Actions](https://github.com/libgit2/libgit2/pull/5706) +- [Add a ThreadSanitizer build](https://github.com/libgit2/libgit2/pull/5597) +- [msvc crtdbg -> win32 leakcheck](https://github.com/libgit2/libgit2/pull/5580) +- [Add missing worktree_dir check and test case](https://github.com/libgit2/libgit2/pull/5692) +- [Fix the `-DTHREADSAFE=OFF` build](https://github.com/libgit2/libgit2/pull/5690) +- [ci: propagate environment variables](https://github.com/libgit2/libgit2/pull/5703) +- [ci: supply a token for self-hosted runners](https://github.com/libgit2/libgit2/pull/5702) +- [ci: supply a token for self-hosted runners](https://github.com/libgit2/libgit2/pull/5701) +- [ci: GitHub Actions for arm64](https://github.com/libgit2/libgit2/pull/5700) +- [ci: stop using deprecated set-env in GitHub Actions](https://github.com/libgit2/libgit2/pull/5697) +- [Deprecate `is_valid_name` functions; replace with `name_is_valid` functions](https://github.com/libgit2/libgit2/pull/5659) +- [Include `${MBEDTLS_INCLUDE_DIR}` when compiling `crypt_mbedtls.c`](https://github.com/libgit2/libgit2/pull/5685) +- [threadstate: rename tlsdata when building w/o threads](https://github.com/libgit2/libgit2/pull/5668) +- [Refactor "global" state](https://github.com/libgit2/libgit2/pull/5546) +- [Make the Windows leak detection more robust](https://github.com/libgit2/libgit2/pull/5661) +- [Define `git___load` when building with `-DTHREADSAFE=OFF`](https://github.com/libgit2/libgit2/pull/5664) +- [ntlm: update ntlm dependency for htonll](https://github.com/libgit2/libgit2/pull/5658) +- [libgit2 v1.1.0](https://github.com/libgit2/libgit2/pull/5660) +- [Update PCRE to 8.44](https://github.com/libgit2/libgit2/pull/5649) +- [clone: update origin's HEAD](https://github.com/libgit2/libgit2/pull/5651) +- [Improve the support of atomics](https://github.com/libgit2/libgit2/pull/5594) +- [Fix error return for invalid extensions.](https://github.com/libgit2/libgit2/pull/5656) +- [Change bare free to allocator free (fixes #5653)](https://github.com/libgit2/libgit2/pull/5654) +- [midx: Introduce a parser for multi-pack-index files](https://github.com/libgit2/libgit2/pull/5401) +- [Fixed typo in comment](https://github.com/libgit2/libgit2/pull/5648) +- [Fix binary diff showing /dev/null](https://github.com/libgit2/libgit2/pull/5494) +- [httpclient: only free challenges for current_server type](https://github.com/libgit2/libgit2/pull/5576) +- [Respect `init.defaultBranch` setting](https://github.com/libgit2/libgit2/pull/5581) +- [patch_parse: handle absence of "index" header for new/deleted cases](https://github.com/libgit2/libgit2/pull/5620) +- [boolean config parsing fails in some cases with mapped values](https://github.com/libgit2/libgit2/pull/5626) +- [Fix config file parsing with multi line values containing quoted parts](https://github.com/libgit2/libgit2/pull/5629) +- [Fix release build warnings](https://github.com/libgit2/libgit2/pull/5636) +- [Fix deprecation links inside of documentation not working](https://github.com/libgit2/libgit2/pull/5631) +- [Fix typo: Make ifndef macroname the same as the define name](https://github.com/libgit2/libgit2/pull/5632) +- [diff stats: fix segfaults with new files](https://github.com/libgit2/libgit2/pull/5619) +- [WinHTTP: Try to use TLS1.3](https://github.com/libgit2/libgit2/pull/5633) +- [Fixed includes for FreeBSD](https://github.com/libgit2/libgit2/pull/5628) +- [Don't fail if a HTTP server announces he supports a protocol upgrade](https://github.com/libgit2/libgit2/pull/5624) +- [Return false instead of segfaulting when checking for default port](https://github.com/libgit2/libgit2/pull/5621) +- [deps: ntlmclient: fix htonll for Haiku](https://github.com/libgit2/libgit2/pull/5612) +- [azure: Remove job generating documentation](https://github.com/libgit2/libgit2/pull/5615) +- [Fix crash in git_describe_commit when opts are NULL.](https://github.com/libgit2/libgit2/pull/5617) +- [Fix `git_mwindow_scan_recently_used` spuriously returning true](https://github.com/libgit2/libgit2/pull/5600) +- [zstream: handle Z_BUF_ERROR appropriately in get_output_chunk](https://github.com/libgit2/libgit2/pull/5599) +- [docs: fix typo](https://github.com/libgit2/libgit2/pull/5610) + + +## v0.28.0-alpha.1 [(2021-03-12)](https://github.com/nodegit/nodegit/releases/tag/v0.28.0-alpha.1) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.27.0...v0.28.0-alpha.1) + +#### Summary of changes +- *Notice* We planned to fix / address Electron 11 compatibility, but ran into some roadblocks. Fix is coming soon, follow [#114](https://github.com/nodegit/nodegit/issues/1774) for details +- Drops support for Node 10.x.y, < 12.19.x, < 14.10.0 +- Brings in LibGit2 1.0.0 +- NodeGit.Config.prototype.setBool handles truthiness, and NodeGit.Config.prototype.getBool returns true or false +- Fix GC ownership memory issue +- Exposes sidebandProgress callback in GitRemoteCallbacks +- Fixes issue with winhttp and optional client certificates +- Addresses proxy issue with certification validation in Windows +- Fix crash in NodeGit.Repository.prototype.refreshReferences +- Deprecations + - NodeGit.Cred is deprecated in favor of NodeGit.Credential + +#### Merged PRs into NodeGit +- [Include libgit2 winhttp proxy fix #1824](https://github.com/nodegit/nodegit/pull/1824) +- [Return/accept boolean for Config#get/setBool #1827](https://github.com/nodegit/nodegit/pull/1827) +- [First stab at #1800 (async/await in examples) #1802](https://github.com/nodegit/nodegit/pull/1802) +- [returns_info: fix ownedByIndices #1823](https://github.com/nodegit/nodegit/pull/1823) +- [Remove block for sideband_progress in remote_callbacks #1801](https://github.com/nodegit/nodegit/pull/1801) +- [Use key to grab credential type #1828](https://github.com/nodegit/nodegit/pull/1828) +- [Don't strdup nullptr from git_tag_message #1822](https://github.com/nodegit/nodegit/pull/1822) +- [Refactor for context-awareness #1795](https://github.com/nodegit/nodegit/pull/1795) +- [Update longpath enums to match libgit2 #1797](https://github.com/nodegit/nodegit/pull/1797) +- [Bump libgit2 to fork of v1.0.0 #1788](https://github.com/nodegit/nodegit/pull/1788) + +#### Merged PRs into Libgit2 +- [winhttp: skip certificate check if unable to send request #5814](https://github.com/libgit2/libgit2/pull/5814) +- [sanitizer ci: skip negotiate tests](https://github.com/libgit2/libgit2/pull/5596) +- [Add CI support for Memory and UndefinedBehavior Sanitizers](https://github.com/libgit2/libgit2/pull/5569) +- [Access HEAD via the refdb backends](https://github.com/libgit2/libgit2/pull/5563) +- [config_entries: Avoid excessive map operations](https://github.com/libgit2/libgit2/pull/5582) +- [mwindow: set limit on number of open files](https://github.com/libgit2/libgit2/pull/5396) +- [refdb: a set of preliminary refactorings for the reftable backend](https://github.com/libgit2/libgit2/pull/5570) +- [CMake modernization pt2](https://github.com/libgit2/libgit2/pull/5547) +- [Make the tests run cleanly under UndefinedBehaviorSanitizer](https://github.com/libgit2/libgit2/pull/5568) +- [Make the tests pass cleanly with MemorySanitizer](https://github.com/libgit2/libgit2/pull/5567) +- [Enable building git2.rc resource script with GCC](https://github.com/libgit2/libgit2/pull/5561) +- [Make NTLMClient Memory and UndefinedBehavior Sanitizer-clean](https://github.com/libgit2/libgit2/pull/5571) +- [Random fixes for diff-printing](https://github.com/libgit2/libgit2/pull/5559) +- [index: Update the documentation for git_index_add_from_buffer()](https://github.com/libgit2/libgit2/pull/5419) +- [Introduce CI with GitHub Actions](https://github.com/libgit2/libgit2/pull/5550) +- [Random code cleanups and fixes](https://github.com/libgit2/libgit2/pull/5552) +- [examples: log: fix documentation generation](https://github.com/libgit2/libgit2/pull/5553) +- [Missing declarations](https://github.com/libgit2/libgit2/pull/5551) +- [clar: add tap output option](https://github.com/libgit2/libgit2/pull/5541) +- [diff::parse: don't include `diff.h`](https://github.com/libgit2/libgit2/pull/5545) +- [release script: fix typo](https://github.com/libgit2/libgit2/pull/5543) +- [tests: offer exact name matching with a `$` suffix](https://github.com/libgit2/libgit2/pull/5537) +- [httpclient: support googlesource](https://github.com/libgit2/libgit2/pull/5536) +- [git_packbuilder_write: Allow setting path to NULL to use the default path](https://github.com/libgit2/libgit2/pull/5532) +- [mempack: Use threads when building the pack](https://github.com/libgit2/libgit2/pull/5531) +- [clar: use internal functions instead of /bin/cp and /bin/rm](https://github.com/libgit2/libgit2/pull/5528) +- [strarray refactoring](https://github.com/libgit2/libgit2/pull/5535) +- [CMake cleanups](https://github.com/libgit2/libgit2/pull/5481) +- [git_pool_init: allow the function to fail](https://github.com/libgit2/libgit2/pull/5526) +- [diff::workdir: actually test the buffers](https://github.com/libgit2/libgit2/pull/5529) +- [Handle unreadable configuration files](https://github.com/libgit2/libgit2/pull/5527) +- [Make git_index_write() generate valid v4 index](https://github.com/libgit2/libgit2/pull/5533) +- [OpenSSL certificate memory leak](https://github.com/libgit2/libgit2/pull/5522) +- [tests: checkout: fix flaky test due to mtime race](https://github.com/libgit2/libgit2/pull/5515) +- [cmake: Sort source files for reproducible builds](https://github.com/libgit2/libgit2/pull/5523) +- [futils: fix order of declared parameters for `git_futils_fake_symlink`](https://github.com/libgit2/libgit2/pull/5517) +- [Check the version in package.json](https://github.com/libgit2/libgit2/pull/5516) +- [tests: merge: fix printf formatter on 32 bit arches](https://github.com/libgit2/libgit2/pull/5513) +- [Update package.json](https://github.com/libgit2/libgit2/pull/5511) +- [Introduce GIT_ASSERT macros](https://github.com/libgit2/libgit2/pull/5509) +- [README.md: Add instructions for building in MinGW environment](https://github.com/libgit2/libgit2/pull/5512) +- [Fix uninitialized stack memory and NULL ptr dereference in stash_to_index](https://github.com/libgit2/libgit2/pull/5510) +- [Honor GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH for all checkout types ](https://github.com/libgit2/libgit2/pull/5378) +- [docs: add documentation for our coding style](https://github.com/libgit2/libgit2/pull/5482) +- [MSVC: Enable Control Flow Guard (CFG)](https://github.com/libgit2/libgit2/pull/5500) +- [git__hexdump: better mimic `hexdump -C`](https://github.com/libgit2/libgit2/pull/5431) +- [Feature: Allow blame to ignore whitespace change](https://github.com/libgit2/libgit2/pull/5383) +- [deps: ntlmclient: use htobe64 on NetBSD too](https://github.com/libgit2/libgit2/pull/5487) +- [sysdir: remove unused git_sysdir_get_str](https://github.com/libgit2/libgit2/pull/5485) +- [Fix typo causing removal of symbol 'git_worktree_prune_init_options'](https://github.com/libgit2/libgit2/pull/5483) +- [pack: Improve error handling for get_delta_base()](https://github.com/libgit2/libgit2/pull/5425) +- [repo::open: ensure we can open the repository](https://github.com/libgit2/libgit2/pull/5480) +- [examples: additions and fixes](https://github.com/libgit2/libgit2/pull/5421) +- [merge: cache negative cache results for similarity metrics](https://github.com/libgit2/libgit2/pull/5477) +- [Handle repository format v1](https://github.com/libgit2/libgit2/pull/5388) +- [CMake: backend selection streamlining](https://github.com/libgit2/libgit2/pull/5440) +- [refdb_fs: remove unused header file](https://github.com/libgit2/libgit2/pull/5461) +- [patch: correctly handle mode changes for renames](https://github.com/libgit2/libgit2/pull/5466) +- [gitignore: clean up patterns from old times](https://github.com/libgit2/libgit2/pull/5474) +- [README.md: update build matrix to reflect our latest releases](https://github.com/libgit2/libgit2/pull/5478) +- [Release v1.0](https://github.com/libgit2/libgit2/pull/5471) +- [refdb_backend: improve callback documentation](https://github.com/libgit2/libgit2/pull/5464) +- [credentials: provide backcompat for opaque structs](https://github.com/libgit2/libgit2/pull/5465) +- [Fix segfault when calling git_blame_buffer()](https://github.com/libgit2/libgit2/pull/5445) +- [Fix spelling error](https://github.com/libgit2/libgit2/pull/5463) +- [refdb_fs: initialize backend version](https://github.com/libgit2/libgit2/pull/5456) +- [repository: improve commondir docs](https://github.com/libgit2/libgit2/pull/5444) +- [cmake: use install directories provided via GNUInstallDirs](https://github.com/libgit2/libgit2/pull/5455) +- [azure: fix errors due to curl and removal of old VM images](https://github.com/libgit2/libgit2/pull/5451) +- [win32: don't canonicalize relative paths](https://github.com/libgit2/libgit2/pull/5435) +- [CMake booleans](https://github.com/libgit2/libgit2/pull/5422) +- [Set proper pkg-config dependency for pcre2](https://github.com/libgit2/libgit2/pull/5439) +- [httpclient: use a 16kb read buffer for macOS](https://github.com/libgit2/libgit2/pull/5432) +- [ci: provide globalsign certs for bionic](https://github.com/libgit2/libgit2/pull/5437) +- [deps: ntlmclient: fix htonll on big endian FreeBSD](https://github.com/libgit2/libgit2/pull/5426) +- [azure-pipelines: download GlobalSign's certificate manually](https://github.com/libgit2/libgit2/pull/5433) +- [deps: ntlmclient: fix missing htonll symbols on FreeBSD and SunOS](https://github.com/libgit2/libgit2/pull/5417) +- [README: add language binding link to wasm-git](https://github.com/libgit2/libgit2/pull/5420) +- [Fix #5410: fix installing libgit2.pc in wrong location](https://github.com/libgit2/libgit2/pull/5412) +- [Fix typo on GIT_USE_NEC](https://github.com/libgit2/libgit2/pull/5413) +- [tests: diff: verify that we are able to diff with empty subtrees](https://github.com/libgit2/libgit2/pull/5374) +- [README: update our build matrix to reflect current releases](https://github.com/libgit2/libgit2/pull/5408) +- [azure: docker: set up HOME variable to fix Coverity builds](https://github.com/libgit2/libgit2/pull/5409) +- [sha1_lookup: inline its only function into "pack.c"](https://github.com/libgit2/libgit2/pull/5390) +- [Coverity fixes](https://github.com/libgit2/libgit2/pull/5391) +- [Release 0.99](https://github.com/libgit2/libgit2/pull/5291) +- [Release script](https://github.com/libgit2/libgit2/pull/5372) +- [azure: fix ARM32 builds by replacing gosu(1)](https://github.com/libgit2/libgit2/pull/5406) +- [openssl: fix Valgrind issues in nightly builds](https://github.com/libgit2/libgit2/pull/5398) +- [fuzzers: Fix the documentation](https://github.com/libgit2/libgit2/pull/5400) +- [azure: fix misleading messages printed to stderr being](https://github.com/libgit2/libgit2/pull/5392) +- [tests: iterator: fix iterator expecting too few items](https://github.com/libgit2/libgit2/pull/5393) +- [transports: http: fix custom headers not being applied](https://github.com/libgit2/libgit2/pull/5387) +- [azure: fix Coverity pipeline](https://github.com/libgit2/libgit2/pull/5382) +- [azure: tests: re-run flaky proxy tests](https://github.com/libgit2/libgit2/pull/5381) +- [fetchhead: strip credentials from remote URL](https://github.com/libgit2/libgit2/pull/5373) +- [azure-pipelines: properly expand negotiate passwords](https://github.com/libgit2/libgit2/pull/5375) +- [cred: change enum to git_credential_t and GIT_CREDENTIAL_*](https://github.com/libgit2/libgit2/pull/5336) +- [Update link to libgit2 Julia language binding](https://github.com/libgit2/libgit2/pull/5371) +- [Return int from non-free functions](https://github.com/libgit2/libgit2/pull/5365) +- [HTTP: Support Apache-based servers with Negotiate](https://github.com/libgit2/libgit2/pull/5286) +- [internal types: change enums from `type_t` to `_t`](https://github.com/libgit2/libgit2/pull/5364) +- [merge: Return non-const git_repository from accessor method](https://github.com/libgit2/libgit2/pull/5358) +- [Do not return free'd git_repository object on error](https://github.com/libgit2/libgit2/pull/5361) +- [refs: refuse to delete HEAD](https://github.com/libgit2/libgit2/pull/5360) +- [index: replace map macros with inline functions](https://github.com/libgit2/libgit2/pull/5351) +- [Make type mismatch errors consistent](https://github.com/libgit2/libgit2/pull/5359) +- [win32: fix relative symlinks pointing into dirs](https://github.com/libgit2/libgit2/pull/5355) +- [ntlm: prevent (spurious) compiler warnings](https://github.com/libgit2/libgit2/pull/5354) +- [Adds support for multiple SSH auth mechanisms being used sequentially](https://github.com/libgit2/libgit2/pull/5305) +- [netops: handle intact query parameters in service_suffix removal](https://github.com/libgit2/libgit2/pull/5339) +- [Refactor packfile code to use zstream abstraction](https://github.com/libgit2/libgit2/pull/5340) +- [Fix git_submodule_sync with relative url](https://github.com/libgit2/libgit2/pull/5322) +- [http: avoid generating double slashes in url](https://github.com/libgit2/libgit2/pull/5325) +- [Correct typo in name of referenced parameter](https://github.com/libgit2/libgit2/pull/5348) +- [patch_parse: fix undefined behaviour due to arithmetic on NULL pointers](https://github.com/libgit2/libgit2/pull/5338) +- [smart_pkt: fix overflow resulting in OOB read/write of one byte](https://github.com/libgit2/libgit2/pull/5337) +- [branch: clarify documentation around branches](https://github.com/libgit2/libgit2/pull/5300) +- [examples: checkout: implement guess heuristic for remote branches](https://github.com/libgit2/libgit2/pull/5283) +- [Minor doc improvements](https://github.com/libgit2/libgit2/pull/5320) +- [attr: Update definition of binary macro](https://github.com/libgit2/libgit2/pull/5333) +- [Security fixes for master](https://github.com/libgit2/libgit2/pull/5331) +- [release.md: note that we do two security releases](https://github.com/libgit2/libgit2/pull/5318) +- [MSVC: Fix warning C4133 on x64: "function": Incompatible types - from "unsigned long *" to "size_t *"](https://github.com/libgit2/libgit2/pull/5317) +- [ci: only push docs from the libgit2/libgit2 repo](https://github.com/libgit2/libgit2/pull/5316) +- [global: convert to fiber-local storage to fix exit races](https://github.com/libgit2/libgit2/pull/5314) +- [Fix copy&paste in git_cherrypick_commit docstring](https://github.com/libgit2/libgit2/pull/5315) +- [patch_parse: fix out-of-bounds reads caused by integer underflow](https://github.com/libgit2/libgit2/pull/5312) +- [tests: fix compiler warning if tracing is disabled](https://github.com/libgit2/libgit2/pull/5311) +- [tests: config: only test parsing huge file with GITTEST_INVASIVE_SPEED](https://github.com/libgit2/libgit2/pull/5313) +- [diff: complete support for git patchid](https://github.com/libgit2/libgit2/pull/5306) +- [Memory optimizations for config entries](https://github.com/libgit2/libgit2/pull/5243) +- [ssh: include sha256 host key hash when supported](https://github.com/libgit2/libgit2/pull/5307) +- [Various examples shape-ups](https://github.com/libgit2/libgit2/pull/5272) +- [Improve trace support in tests](https://github.com/libgit2/libgit2/pull/5309) +- [Move `git_off_t` to `git_object_size_t`](https://github.com/libgit2/libgit2/pull/5123) +- [Add compat typdef for git_attr_t](https://github.com/libgit2/libgit2/pull/5310) +- [CI Build Updates](https://github.com/libgit2/libgit2/pull/5308) +- [patch_parse: use paths from "---"/"+++" lines for binary patches](https://github.com/libgit2/libgit2/pull/5303) +- [Follow 308 redirect in WinHTTP transport](https://github.com/libgit2/libgit2/pull/5285) +- [fileops: correct error return on p_lstat failures when mkdir](https://github.com/libgit2/libgit2/pull/5302) +- [config_mem: implement support for snapshots](https://github.com/libgit2/libgit2/pull/5299) +- [patch_parse: fix segfault when header path contains whitespace only](https://github.com/libgit2/libgit2/pull/5298) +- [config_file: fix race when creating an iterator](https://github.com/libgit2/libgit2/pull/5282) +- [Fix crash if snapshotting a config_snapshot](https://github.com/libgit2/libgit2/pull/5293) +- [fix a bug introduced in 8a23597b](https://github.com/libgit2/libgit2/pull/5295) +- [reflogs: fix behaviour around reflogs with newlines](https://github.com/libgit2/libgit2/pull/5275) +- [commit: verify objects exist in git_commit_with_signature](https://github.com/libgit2/libgit2/pull/5289) +- [patch_parse: fixes for fuzzing errors](https://github.com/libgit2/libgit2/pull/5276) +- [apply: add GIT_APPLY_CHECK](https://github.com/libgit2/libgit2/pull/5227) +- [refs: unlock unmodified refs on transaction commit](https://github.com/libgit2/libgit2/pull/5264) +- [fuzzers: add a new fuzzer for patch parsing](https://github.com/libgit2/libgit2/pull/5269) +- [patch_parse: handle patches without extended headers](https://github.com/libgit2/libgit2/pull/5273) +- [Provide a wrapper for simple submodule clone steps](https://github.com/libgit2/libgit2/pull/4637) +- [macOS GSS Support](https://github.com/libgit2/libgit2/pull/5238) +- [cmake: correct the link stanza for CoreFoundation](https://github.com/libgit2/libgit2/pull/5265) +- [Fix file locking on POSIX OS](https://github.com/libgit2/libgit2/pull/5257) +- [cmake: update minimum CMake version to v3.5.1](https://github.com/libgit2/libgit2/pull/5260) +- [patch_parse: handle patches with new empty files](https://github.com/libgit2/libgit2/pull/5248) +- [DRY commit parsing](https://github.com/libgit2/libgit2/pull/4445) +- [azure: avoid building and testing in Docker as root](https://github.com/libgit2/libgit2/pull/5239) +- [regexp: implement a new regular expression API](https://github.com/libgit2/libgit2/pull/5226) +- [git_refdb API fixes](https://github.com/libgit2/libgit2/pull/5106) +- [Don't use enum for flags](https://github.com/libgit2/libgit2/pull/5242) +- [valgrind: suppress memory leaks in libssh2_session_handshake](https://github.com/libgit2/libgit2/pull/5240) +- [buffer: fix writes into out-of-memory buffers](https://github.com/libgit2/libgit2/pull/5232) +- [cred: add missing private header in GSSAPI block](https://github.com/libgit2/libgit2/pull/5237) +- [CMake pkg-config modulification](https://github.com/libgit2/libgit2/pull/5206) +- [Update chat resources in README.md](https://github.com/libgit2/libgit2/pull/5229) +- [Circular header splitting](https://github.com/libgit2/libgit2/pull/5223) + +## v0.27.0 [(2020-07-28)](https://github.com/nodegit/nodegit/releases/tag/v0.27.0) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.26.5...v0.27.0) + +#### Summary of changes +- Expose git_remote_rename +- Bump OpenSSL from 1.1.0i -> 1.1.1c in Windows/Mac OS Electron builds +- Replace unmaintained request library with got +- Remove promisify-node and use vanilla promises for all NodeGit promises +- Prebuilds for Node 14, deprecate Node 8 +- Persist RemoteCallbacks and ProxyOptions on the remote if using Remote.prototype.connect. This fixes a segfault when using any routines on a connected remote. + +####Merged PRs into NodeGit +- [Upgrade build environments #1785](https://github.com/nodegit/nodegit/pull/1785) +- [Remote needs to persist the callback/proxyOpts/headers #1784](https://github.com/nodegit/nodegit/pull/1784) +- [Remove promisify-node and remove old callback api remnants](https://github.com/nodegit/nodegit/pull/1772) +- [Replace deprecated package request with got](https://github.com/nodegit/nodegit/pull/1771) +- [Bump OpenSSL prebuilt to 1.1.1c](https://github.com/nodegit/nodegit/pull/1770) +- [Expose git_remote_rename](https://github.com/nodegit/nodegit/pull/1767) +- [Dedupe Remote.prototype.fetch](https://github.com/nodegit/nodegit/pull/1766) + +## v0.27.0-alpha.1 [(2020-03-26)](https://github.com/nodegit/nodegit/releases/tag/v0.27.0-alpha.1) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.26.5...v0.27.0-alpha.1) + +#### Summary of changes +- Expose git_remote_rename +- Bump OpenSSL from 1.1.0i -> 1.1.1c in Windows/Mac OS Electron builds +- Replace unmaintained request library with got +- Remove promisify-node and use vanilla promises for all NodeGit promises + +#### Merged PRs into NodeGit +- [Remove promisify-node and remove old callback api remnants](https://github.com/nodegit/nodegit/pull/1772) +- [Replace deprecated package request with got](https://github.com/nodegit/nodegit/pull/1771) +- [Bump OpenSSL prebuilt to 1.1.1c](https://github.com/nodegit/nodegit/pull/1770) +- [Expose git_remote_rename](https://github.com/nodegit/nodegit/pull/1767) +- [Dedupe Remote.prototype.fetch](https://github.com/nodegit/nodegit/pull/1766) + + +## v0.26.5 [(2020-02-27)](https://github.com/nodegit/nodegit/releases/tag/v0.26.5) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.26.4...v0.26.5) + +#### Summary of changes +- Bring in improvement to client certificate handling on Windows from [winhttp: support optional client cert #5384](https://github.com/libgit2/libgit2/pull/5384) +- `Commit.prototype.parent()` now correctly assigns the repo property on the retrieved commit. This should solve certain bugs when working with a commit retrieved from `parent`. + +#### Merged PRs into NodeGit +- [Bring in Libgit2 #5384 to NodeGit](https://github.com/nodegit/nodegit/pull/1758) +- [Fix behavior of Commit#parent](https://github.com/nodegit/nodegit/pull/1509) +- [Remove DiffList](https://github.com/nodegit/nodegit/pull/1733) +- [Remove unnecessary assignment of Commit#repo](https://github.com/nodegit/nodegit/pull/1508) + +#### Merged PRs into LibGit2 +- [winhttp: support optional client cert #5384](https://github.com/libgit2/libgit2/pull/5384) +- [Support `core.longpaths` on Windows #5347](https://github.com/libgit2/libgit2/pull/5347) +- [Parallelize checkout_create_the_new for perf #4205](https://github.com/libgit2/libgit2/pull/4205) + + +## v0.26.4 [(2020-01-14)](https://github.com/nodegit/nodegit/releases/tag/v0.26.4) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.26.3...v0.26.4) + +#### Summary of changes +- Bumped LibGit2 + - Now can be configured to support longpaths on Windows. Does not respect the config value, but is configured through `NodeGit.Libgit2.opts`. See [#1748](https://github.com/nodegit/nodegit/pull/1748) for details. + - Support for complex SSH auth creds handshakes +- Pulled in patch for Libssh2 that covers an integer overflow, see [Libssh2#402](https://github.com/libssh2/libssh2/pull/402) + +#### Merged PRs into NodeGit +- [Fix some issues from the libgit2 bump](https://github.com/nodegit/nodegit/pull/1751) +- [Add option to support longpaths on Windows](https://github.com/nodegit/nodegit/pull/1748) +- [Bring in libssh2#402](https://github.com/nodegit/nodegit/pull/1749) +- [Wait for copy and remove promises to finish](https://github.com/nodegit/nodegit/pull/1730) + +#### Merged PRs into LibGit2 +- [Support `core.longpaths` on Windows #5347](https://github.com/libgit2/libgit2/pull/5347) +- [Parallelize checkout_create_the_new for perf #4205](https://github.com/libgit2/libgit2/pull/4205) +- [win32: fix relative symlinks pointing into dirs](https://github.com/libgit2/libgit2/pull/5355) +- [ntlm: prevent (spurious) compiler warnings](https://github.com/libgit2/libgit2/pull/5354) +- [Adds support for multiple SSH auth mechanisms being used sequentially](https://github.com/libgit2/libgit2/pull/5305) +- [netops: handle intact query parameters in service_suffix removal](https://github.com/libgit2/libgit2/pull/5339) +- [Refactor packfile code to use zstream abstraction](https://github.com/libgit2/libgit2/pull/5340) +- [Fix git_submodule_sync with relative url](https://github.com/libgit2/libgit2/pull/5322) +- [http: avoid generating double slashes in url](https://github.com/libgit2/libgit2/pull/5325) +- [Correct typo in name of referenced parameter](https://github.com/libgit2/libgit2/pull/5348) +- [patch_parse: fix undefined behaviour due to arithmetic on NULL pointers](https://github.com/libgit2/libgit2/pull/5338) +- [smart_pkt: fix overflow resulting in OOB read/write of one byte](https://github.com/libgit2/libgit2/pull/5337) +- [branch: clarify documentation around branches](https://github.com/libgit2/libgit2/pull/5300) +- [examples: checkout: implement guess heuristic for remote branches](https://github.com/libgit2/libgit2/pull/5283) +- [Minor doc improvements](https://github.com/libgit2/libgit2/pull/5320) +- [attr: Update definition of binary macro](https://github.com/libgit2/libgit2/pull/5333) +- [Security fixes for master](https://github.com/libgit2/libgit2/pull/5331) +- [release.md: note that we do two security releases](https://github.com/libgit2/libgit2/pull/5318) +- [MSVC: Fix warning C4133 on x64: "function": Incompatible types - from "unsigned long *" to "size_t *"](https://github.com/libgit2/libgit2/pull/5317) +- [ci: only push docs from the libgit2/libgit2 repo](https://github.com/libgit2/libgit2/pull/5316) +- [global: convert to fiber-local storage to fix exit races](https://github.com/libgit2/libgit2/pull/5314) +- [Fix copy&paste in git_cherrypick_commit docstring](https://github.com/libgit2/libgit2/pull/5315) +- [patch_parse: fix out-of-bounds reads caused by integer underflow](https://github.com/libgit2/libgit2/pull/5312) +- [tests: fix compiler warning if tracing is disabled](https://github.com/libgit2/libgit2/pull/5311) +- [tests: config: only test parsing huge file with GITTEST_INVASIVE_SPEED](https://github.com/libgit2/libgit2/pull/5313) +- [diff: complete support for git patchid](https://github.com/libgit2/libgit2/pull/5306) +- [Memory optimizations for config entries](https://github.com/libgit2/libgit2/pull/5243) +- [ssh: include sha256 host key hash when supported](https://github.com/libgit2/libgit2/pull/5307) +- [Various examples shape-ups](https://github.com/libgit2/libgit2/pull/5272) +- [Improve trace support in tests](https://github.com/libgit2/libgit2/pull/5309) +- [Move `git_off_t` to `git_object_size_t`](https://github.com/libgit2/libgit2/pull/5123) +- [Add compat typdef for git_attr_t](https://github.com/libgit2/libgit2/pull/5310) +- [CI Build Updates](https://github.com/libgit2/libgit2/pull/5308) +- [patch_parse: use paths from "---"/"+++" lines for binary patches](https://github.com/libgit2/libgit2/pull/5303) +- [Follow 308 redirect in WinHTTP transport](https://github.com/libgit2/libgit2/pull/5285) +- [fileops: correct error return on p_lstat failures when mkdir](https://github.com/libgit2/libgit2/pull/5302) +- [config_mem: implement support for snapshots](https://github.com/libgit2/libgit2/pull/5299) +- [patch_parse: fix segfault when header path contains whitespace only](https://github.com/libgit2/libgit2/pull/5298) +- [config_file: fix race when creating an iterator](https://github.com/libgit2/libgit2/pull/5282) +- [Fix crash if snapshotting a config_snapshot](https://github.com/libgit2/libgit2/pull/5293) +- [fix a bug introduced in 8a23597b](https://github.com/libgit2/libgit2/pull/5295) +- [reflogs: fix behaviour around reflogs with newlines](https://github.com/libgit2/libgit2/pull/5275) +- [commit: verify objects exist in git_commit_with_signature](https://github.com/libgit2/libgit2/pull/5289) +- [patch_parse: fixes for fuzzing errors](https://github.com/libgit2/libgit2/pull/5276) +- [apply: add GIT_APPLY_CHECK](https://github.com/libgit2/libgit2/pull/5227) +- [refs: unlock unmodified refs on transaction commit](https://github.com/libgit2/libgit2/pull/5264) +- [fuzzers: add a new fuzzer for patch parsing](https://github.com/libgit2/libgit2/pull/5269) +- [patch_parse: handle patches without extended headers](https://github.com/libgit2/libgit2/pull/5273) +- [Provide a wrapper for simple submodule clone steps](https://github.com/libgit2/libgit2/pull/4637) +- [macOS GSS Support](https://github.com/libgit2/libgit2/pull/5238) +- [cmake: correct the link stanza for CoreFoundation](https://github.com/libgit2/libgit2/pull/5265) +- [Fix file locking on POSIX OS](https://github.com/libgit2/libgit2/pull/5257) +- [cmake: update minimum CMake version to v3.5.1](https://github.com/libgit2/libgit2/pull/5260) +- [patch_parse: handle patches with new empty files](https://github.com/libgit2/libgit2/pull/5248) +- [DRY commit parsing](https://github.com/libgit2/libgit2/pull/4445) +- [azure: avoid building and testing in Docker as root](https://github.com/libgit2/libgit2/pull/5239) +- [regexp: implement a new regular expression API](https://github.com/libgit2/libgit2/pull/5226) +- [git_refdb API fixes](https://github.com/libgit2/libgit2/pull/5106) +- [Don't use enum for flags](https://github.com/libgit2/libgit2/pull/5242) +- [valgrind: suppress memory leaks in libssh2_session_handshake](https://github.com/libgit2/libgit2/pull/5240) +- [buffer: fix writes into out-of-memory buffers](https://github.com/libgit2/libgit2/pull/5232) +- [cred: add missing private header in GSSAPI block](https://github.com/libgit2/libgit2/pull/5237) +- [CMake pkg-config modulification](https://github.com/libgit2/libgit2/pull/5206) +- [Update chat resources in README.md](https://github.com/libgit2/libgit2/pull/5229) +- [Circular header splitting](https://github.com/libgit2/libgit2/pull/5223) + + +## v0.26.3 [(2019-12-10)](https://github.com/nodegit/nodegit/releases/tag/v0.26.3) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.26.2...v0.26.3) + +#### Summary of changes +- Include LibGit2 security patch: https://github.com/libgit2/libgit2/releases/tag/v0.28.4 + +#### Merged PRs into NodeGit +- [Bring in security patches from libgit2 #1743](https://github.com/nodegit/nodegit/pull/1743) + + +## v0.26.2 [(2019-09-26)](https://github.com/nodegit/nodegit/releases/tag/v0.26.2) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.26.1...v0.26.2) + +#### Summary of changes +- Added options to fetch additional data (gpg signature) from LibGit2 in revWalk.prototype.commitWalk and return plain objects + - _revWalk.prototype.commitWalk(numCommits: number, { returnPlainObjects: boolean })_ + +#### Merged PRs into NodeGit +- [Optionally retrieve more data on commit walk #1728](https://github.com/nodegit/nodegit/pull/1728) + + +## v0.26.1 [(2019-09-16)](https://github.com/nodegit/nodegit/releases/tag/v0.26.1) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.26.0...v0.26.1) + +#### Summary of changes +- Bumped LibGit2 + - Additional git ignore fixes + - Allow credentials callback to return any credential type from list of supported types + - Memory leak and allocation fixes +- updateTips has optional parameters and should convert plain objects into options structs correctly now +- Added Nodegit.Blob.prototype.filter, this should be used instead of NodeGit.Blob.filteredContent as it is not deprecated. + +#### Merged PRs into NodeGit +- [Bump libgit2 to latest fork of master #1723](https://github.com/nodegit/nodegit/pull/1723) +- [updateTips: optional param and normalizeOptions #1722](https://github.com/nodegit/nodegit/pull/1722) + +#### Merged PRs into LibGit2 +- [Parallelize checkout_create_the_new for perf #4205](https://github.com/libgit2/libgit2/pull/4205) +- [azure: build Docker images as part of the pipeline](https://github.com/libgit2/libgit2/pull/5198) +- [smart: use push_glob instead of manual filtering](https://github.com/libgit2/libgit2/pull/5195) +- [ntlm: fix failure to find openssl headers](https://github.com/libgit2/libgit2/pull/5216) +- [cmake: remove extraneous logging](https://github.com/libgit2/libgit2/pull/5222) +- [open:fix memory leak when passing NULL to git_repository_open_ext](https://github.com/libgit2/libgit2/pull/5224) +- [apply: Fix a patch corruption related to EOFNL handling](https://github.com/libgit2/libgit2/pull/5209) +- [ignore: correct handling of nested rules overriding wild card unignore](https://github.com/libgit2/libgit2/pull/5210) +- [Memory allocation fixes for diff generator](https://github.com/libgit2/libgit2/pull/5214) +- [Use an HTTP scheme that supports the given credentials](https://github.com/libgit2/libgit2/pull/5212) +- [apply: git_apply_to_tree fails to apply patches that add new files](https://github.com/libgit2/libgit2/pull/5208) +- [Optionally read `.gitattributes` from HEAD](https://github.com/libgit2/libgit2/pull/5189) +- [config: implement "onbranch" conditional](https://github.com/libgit2/libgit2/pull/5196) +- [Fix include casing for case-sensitive filesystems.](https://github.com/libgit2/libgit2/pull/5213) +- [util: use 64 bit timer on Windows](https://github.com/libgit2/libgit2/pull/5054) +- [Memory allocation audit](https://github.com/libgit2/libgit2/pull/5200) +- [clone: don't decode URL percent encodings](https://github.com/libgit2/libgit2/pull/5187) +- [Security updates from 0.28.3](https://github.com/libgit2/libgit2/pull/5202) + + +## v0.26.0 [(2019-09-09)](https://github.com/nodegit/nodegit/releases/tag/v0.26.0) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.1...v0.26.0) + +#### Summary of changes +- Bumped libssh2 to 1.9 for security patch +- Remote.prototype.upload and Remote.prototype.updateTips should be async now + +#### Merged PRs into NodeGit +- [GitRemote upload and updateTips are async #1720](https://github.com/nodegit/nodegit/pull/1720) +- [Update libssh2 to 1.9 #1719](https://github.com/nodegit/nodegit/pull/1719) + + +## v0.25.1 [(2019-08-13)](https://github.com/nodegit/nodegit/releases/tag/v0.25.1) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0...v0.25.1) + +#### Summary of changes +Security patch for LibGit2: +- A carefully constructed commit object with a very large number + of parents may lead to potential out-of-bounds writes or + potential denial of service. + +- The ProgramData configuration file is always read for compatibility + with Git for Windows and Portable Git installations. The ProgramData + location is not necessarily writable only by administrators, so we + now ensure that the configuration file is owned by the administrator + or the current user. + +Additionally: +- Stash should run much faster now. + +#### Merged PRs into LibGit2 +- [Parallelize checkout_create_the_new for perf #4205](https://github.com/libgit2/libgit2/pull/4205) +- [stash: avoid recomputing tree when committing worktree](https://github.com/libgit2/libgit2/pull/5113) +- [Variadic macros](https://github.com/libgit2/libgit2/pull/5121) +- [Add sign capability to git_rebase_commit](https://github.com/libgit2/libgit2/pull/4913) +- [remote: remove unused block of code](https://github.com/libgit2/libgit2/pull/5197) +- [Adjust printf specifiers in examples code](https://github.com/libgit2/libgit2/pull/5146) +- [config: check if we are running in a sandboxed environment](https://github.com/libgit2/libgit2/pull/5191) +- [Fix example checkout to forbid rather than require --](https://github.com/libgit2/libgit2/pull/5184) +- [editorconfig: update to match our coding style](https://github.com/libgit2/libgit2/pull/5183) +- [Compare buffers in diff example](https://github.com/libgit2/libgit2/pull/5125) +- [Include ahead_behind in the test suite](https://github.com/libgit2/libgit2/pull/5135) +- [config: separate file and snapshot backends](https://github.com/libgit2/libgit2/pull/5186) +- [object: deprecate git_object__size for removal](https://github.com/libgit2/libgit2/pull/5192) + + +## v0.25.0 [(2019-08-09)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.24.3...v0.25.0) + +#### Summary of changes +##### BREAKING +- `getRemotes` no longer returns remote names, it now returns remote objects directly. Use `getRemoteNames` to get a list of remote names. +- Converted Buf.prototype.set and Buf.prototype.grow from async to sync +- `Repository.prototype.continueRebase` will now throw on any error except for EAPPLIED on the first call to `Rebase.prototype.next` +- Drops support for Ubuntu 14 after EOL +- Removed access to the `diff_so_far` param in `git_diff_notify_cb` and `git_diff_progress_cb` +- Changed `FilterSource.prototype.repo` to async to prevent segfaults on filters that run during `Submodule.status` +- Changed `NodeGit.Signature.default` to async, because it actually ends up reading the config. +- Fixed bug where templates were not reporting errors for synchronous methods. It's a bit of a wide net, but in general, + it is now possible certain sync methods in NodeGit will begin failing that did not fail before. This is the correct + behavior. + +##### Deprecations +- Support signing commits in `Repository.prototype.mergeBranches`. The last parameter `processMergeMessageCallback` is now deprecated, but will continue to work. Use the options object instead, which will contain the `processMergeMessageCallback`, as well as the `signingCb`. + +##### New +- Support for Node 12 +- Add signing support for commits and annotated tags + - Enforced consistent use of signing callbacks within the application. Any object that implements the signingCallback + pattern for signing commits or tags should use the exact same callback type and with the same meaning. + `type SigningCallback = (content: string) => {| code: number, field?: string, signedData?: string |};` + If the code is `NodeGit.Error.CODE.OK` or 0, the operation will succeed and _at least_ signedData is expected to be filled out. + If the code is a negative number, except for `NodeGit.Error.CODE.PASSTHROUGH`, the signing operation will fail. + If the code is `NodeGit.Error.CODE.PASSTHROUGH`, the operation will continue without signing the object. +- Exposed `AnnotatedCommit` methods: + - `AnnotatedCommit.prototype.ref` +- Exposed `Apply` methods: + - `Apply.apply` applies a diff to the repository + - `Apply.toTree` applies a diff to a tree +- Exposed `Config` methods: + - `Config.prototype.deleteEntry` + - `Config.prototype.deleteMultivar` + - `Config.prototype.getBool` + - `Config.prototype.getInt32` + - `Config.prototype.getInt64` + - `Config.prototype.setMultivar` + - `Config.prototype.snapshot` +- Exposed `ConfigIterator` with methods: + - `ConfigIterator.create` + - `ConfigIterator.createGlob` + - `ConfigIterator.createMultivar` + - `ConfigIterator.prototype.next` +- Exposed `IndexNameEntry`: + - `IndexNameEntry.add` + - `IndexNameEntry.clear` + - `IndexNameEntry.entryCount` + - `IndexNameEntry.getByIndex` + - `IndexNameEntry.prototype.ancestor` + - `IndexNameEntry.prototype.ours` + - `IndexNameEntry.prototype.theirs` +- Exposed `IndexReucEntry`: + - `IndexReucEntry.add` + - `IndexReucEntry.clear` + - `IndexReucEntry.entryCount` + - `IndexReucEntry.find` + - `IndexReucEntry.getByIndex` + - `IndexReucEntry.getByPath` + - `IndexReucEntry.remove` + - `IndexReucEntry.prototype.mode` + - `IndexReucEntry.prototype.oid` + - `IndexReucEntry.prototype.path` +- Exposed `Mailmap`: + - `Mailmap.prototype.addEntry` + - `Mailmap.fromBuffer` + - `Mailmap.fromRepository` + - `Mailmap.create` + - `Mailmap.prototype.resolve` + - `Mailmap.prototype.resolveSignature` +- Exposed `Merge` methods: + - `Merge.analysis` + - `Merge.analysisForRef` +- Exposed `Path.isGitfile` +- Added `RebaseOptions` to `Repository.prototype.rebaseContinue` +- Added `NodeGit.Reference.updateTerminal` +- Exposed `Remote` methods: + - `Remote.createWithOpts` +- Exposed `Tag.createFromBuffer` +- Expose `Tree.prototype.createUpdated(repo, numUpdates, updates)` + +##### Fixed +- Updates lodash dependency to address security notice +- Fixed a prototype problem with cherrypick, merge, and other collections that have a function at their root. call, apply, and bind should now be on NodeGit.Cherrypick. +- Bumped libssh2 to resolve security notice. +- Improve speed and correctness of fileHistoryWalk. The API should not have changed; however, when the end of the walk has been reached, `reachedEndOfHistory` will be specified on the resulting array. +- Fixes openssl prebuilt downloads for electron builds +- Fixes commits retrieved from `Commit.prototype.parent` +- Bump Node-Gyp to 4.0.0 to fix tar security vulnerability +- Optimized a set of routines in NodeGit. These methods as written in Javascript require hundreds or thousands of requests to async workers to retrieve data. We've batched these requests and performed them on a single async worker. There are now native implementations of the following: + - `Repository.prototype.getReferences`: Retrieves all references on async worker. + - `Repository.prototype.getRemotes`: Retrieves all remotes on async worker. + - `Repository.prototype.getSubmodules`: Retrieves all submodules on async worker. + - `Repository.prototype.refreshReferences`: Open sourced function from GitKraken. Grabs a lot of information about references on an async worker. + - `Revwalk.prototype.commitWalk`: Retrieves up to N commits from a revwalk on an async worker. +- When installing on a machine that has yarn and does not have npm, the preinstall script should succeed now +- `ceiling_dirs` is now an optional parameter to `Repository.discover` +- Added support for building on IBM i (PASE) machines +- Fixed leak where struct/option types were leaking libgit2 pointers +- Switched `NodeGit.Oid.fromString`'s internal implementation from `git_oid_fromstr` to `git_oid_fromstrp` +- Fixed builds for Electron 4 +- Updated `Signature.prototype.toString` to optionally include timestamps + +##### LibGit2 Bump +- Fixes gitignore issue with pattern negation +- `Remote.list` now gets the correct list of remotes if remotes are changed by external process +- Always use builtin regex for linux for portability +- Use Iconv on OSX for better internationalization support. +- Removed LibCurl from LibGit2: + - Now with built-in NTLM proxy support + - Now with built-in Negotiate/Kerberos proxy support + - Working with proxy URLs may be different as curl could auto detect scheme for proxies +- Various git config fixes +- Various git ignore fixes +- Various libgit2 performance improvements +- Windows/Linux now use PCRE for regex, OSX uses regcomp_l, this should address collation issues in diffing + +#### Merged PRs into NodeGit +- [Add deprecation warnings for enums that need them. #1711](https://github.com/nodegit/nodegit/pull/1711) +- [https://github.com/nodegit/nodegit/pull/1706](https://github.com/nodegit/nodegit/pull/1706) +- [Reintroduce Odb.prototype.addDiskAlternate #1695](https://github.com/nodegit/nodegit/pull/1695) +- [Fix behaviour of Repository#getReferences #1708](https://github.com/nodegit/nodegit/pull/1708) +- [Bump libgit2 #1705](https://github.com/nodegit/nodegit/pull/1705) +- [Fix Tree#createUpdated #1704](https://github.com/nodegit/nodegit/pull/1704) +- [Fix failing tests on CI #1703](https://github.com/nodegit/nodegit/pull/1703) +- [Audit lodash and fix package-lock.json #1702](https://github.com/nodegit/nodegit/pull/1702) +- [Implement support for Node 12 #1696](https://github.com/nodegit/nodegit/pull/1696) +- [Remove NSEC #1699](https://github.com/nodegit/nodegit/pull/1699) +- [Use builtin regex library for linux for better portability #1693](https://github.com/nodegit/nodegit/pull/1693) +- [Remove pcre-config from binding.gyp #1694](https://github.com/nodegit/nodegit/pull/1694) +- [refresh_references.cc: skip refs that can't be directly resolved #1689](https://github.com/nodegit/nodegit/pull/1689) +- [Bump libgit2 to fork of latest master #1690](https://github.com/nodegit/nodegit/pull/1690) +- [Bump libssh2 to 1.8.2 and fix some npm audit warnings #1678](https://github.com/nodegit/nodegit/pull/1678) +- [Root functions should keep their function prototypes correctly #1681](https://github.com/nodegit/nodegit/pull/1681) +- [refresh_references.cc: bust LibGit2 remote list cache by reading config #1685](https://github.com/nodegit/nodegit/pull/1685) +- [Implement faster file history walk #1676](https://github.com/nodegit/nodegit/pull/1676) +- [EOL for Node 6 and Ubuntu 14.04 #1649](https://github.com/nodegit/nodegit/pull/1649) +- [Ensures that commits from parent(*) has a repository #1658](https://github.com/nodegit/nodegit/pull/1658) +- [Update openssl conan distributions #1663](https://github.com/nodegit/nodegit/pull/1663) +- [Support signing in Repository#mergeBranches #1664](https://github.com/nodegit/nodegit/pull/1664) +- [Dependency upgrade node-gyp upgraded to 4.0.0 #1672](https://github.com/nodegit/nodegit/pull/1672) +- [Add additional getters to streamline information gathering (breaking change) #1671](https://github.com/nodegit/nodegit/pull/1671) +- [Clean up some dangerous memory accesses in callbacks #1642](https://github.com/nodegit/nodegit/pull/1642) +- [Output the item that was deprecated when giving deprecation notice #1643](https://github.com/nodegit/nodegit/pull/1643) +- [Don't fail yarn installs when we can't find npm #1644](https://github.com/nodegit/nodegit/pull/1644) +- [`ceiling_dirs` parameter in `Repository.discover` is optional #1245](https://github.com/nodegit/nodegit/pull/1245) +- [Add missing `shouldAlloc` declarations for git_merge_analysis* functions #1641](https://github.com/nodegit/nodegit/pull/1641) +- [Fix regex state causing subsequent runs of Tag.extractSignature to fail #1630](https://github.com/nodegit/nodegit/pull/1630) +- [Update LibGit2 docs to v0.28.0 #1631](https://github.com/nodegit/nodegit/pull/1631) +- [Add support for building on IBM i (PASE) #1634](https://github.com/nodegit/nodegit/pull/1634) +- [Expose more config methods #1635](https://github.com/nodegit/nodegit/pull/1635) +- [Catch errors and pass them to libgit2 as error codes in rebase signingcb #1636](https://github.com/nodegit/nodegit/pull/1636) +- [Simplify check for IBM i operating system #1637](https://github.com/nodegit/nodegit/pull/1637) +- [Bump LibGit2 to fork of v0.28.1 #1638](https://github.com/nodegit/nodegit/pull/1638) +- [We should clear the persistent cell in structs when they are destroyed #1629](https://github.com/nodegit/nodegit/pull/1629) +- [Fix "errorno" typo #1628](https://github.com/nodegit/nodegit/pull/1628) +- [Bump Libgit2 fork to v0.28.0 #1627](https://github.com/nodegit/nodegit/pull/1627) +- [Fix macOS and Windows Electron 4 builds #1626](https://github.com/nodegit/nodegit/pull/1626) +- [Fix non-existent / dangling refs cause Repository.prototype.createCommitWithSignature to fail #1624](https://github.com/nodegit/nodegit/pull/1624) +- [Handle new gyp information for electron builds #1623](https://github.com/nodegit/nodegit/pull/1623) +- [Use same API for signingCb in all places that can be crypto signed #1621](https://github.com/nodegit/nodegit/pull/1621) +- [Breaking: Repository.prototype.continueRebase enhancements #1619](https://github.com/nodegit/nodegit/pull/1619) +- [adds support for gpg commit signing (fixes #1018) #1448](https://github.com/nodegit/nodegit/pull/1448) +- [Add `updateRef` parameter to Repository#createCommitWithSignature #1610](https://github.com/nodegit/nodegit/pull/1610) +- [Documentation fixes. #1611](https://github.com/nodegit/nodegit/pull/1611) +- [Add Commit#amendWithSignature #1616](https://github.com/nodegit/nodegit/pull/1616) +- [Bump libgit2 to a preview of v0.28 #1615](https://github.com/nodegit/nodegit/pull/1615) +- [Fix issues with Commit#amendWithSignature #1617](https://github.com/nodegit/nodegit/pull/1617) +- [Marked Repository.createBlobFromBuffer as async #1614](https://github.com/nodegit/nodegit/pull/1614) +- [Add functionality for creating Tags with signatures and extracting signatures from Tags #1618](https://github.com/nodegit/nodegit/pull/1618) + +#### Merged PRs into LibGit2 +- [Add sign capability to git_rebase_commit #4913](https://github.com/libgit2/libgit2/pull/4913) +- [Parallelize checkout_create_the_new for perf #4205](https://github.com/libgit2/libgit2/pull/4205) +- [config_file: refresh when creating an iterator](https://github.com/libgit2/libgit2/pull/5181) +- [azure: drop powershell](https://github.com/libgit2/libgit2/pull/5141) +- [fuzzer: use futils instead of fileops](https://github.com/libgit2/libgit2/pull/5180) +- [w32: fix unlinking of directory symlinks](https://github.com/libgit2/libgit2/pull/5151) +- [patch_parse: fix segfault due to line containing static contents](https://github.com/libgit2/libgit2/pull/5179) +- [ignore: fix determining whether a shorter pattern negates another](https://github.com/libgit2/libgit2/pull/5173) +- [patch_parse: handle missing newline indicator in old file](https://github.com/libgit2/libgit2/pull/5159) +- [patch_parse: do not depend on parsed buffer's lifetime](https://github.com/libgit2/libgit2/pull/5158) +- [sha1: fix compilation of WinHTTP backend](https://github.com/libgit2/libgit2/pull/5174) +- [repository: do not initialize HEAD if it's provided by templates](https://github.com/libgit2/libgit2/pull/5176) +- [configuration: cvar -> configmap](https://github.com/libgit2/libgit2/pull/5138) +- [Evict cache items more efficiently](https://github.com/libgit2/libgit2/pull/5172) +- [clar: fix suite count](https://github.com/libgit2/libgit2/pull/5175) +- [Ignore VS2017 specific files and folders](https://github.com/libgit2/libgit2/pull/5163) +- [gitattributes: ignore macros defined in subdirectories](https://github.com/libgit2/libgit2/pull/5156) +- [clar: correctly account for "data" suites when counting](https://github.com/libgit2/libgit2/pull/5168) +- [Allocate memory more efficiently when packing objects](https://github.com/libgit2/libgit2/pull/5170) +- [fileops: fix creation of directory in filesystem root](https://github.com/libgit2/libgit2/pull/5131) +- [win32: fix fuzzers and have CI build them](https://github.com/libgit2/libgit2/pull/5160) +- [Config parser separation](https://github.com/libgit2/libgit2/pull/5134) +- [config_file: implement stat cache to avoid repeated rehashing](https://github.com/libgit2/libgit2/pull/5132) +- [ci: build with ENABLE_WERROR on Windows](https://github.com/libgit2/libgit2/pull/5143) +- [Fix Regression: attr: Correctly load system attr file (on Windows)](https://github.com/libgit2/libgit2/pull/5152) +- [hash: fix missing error return on production builds](https://github.com/libgit2/libgit2/pull/5145) +- [Resolve static check warnings in example code](https://github.com/libgit2/libgit2/pull/5142) +- [Multiple hash algorithms](https://github.com/libgit2/libgit2/pull/4438) +- [More documentation](https://github.com/libgit2/libgit2/pull/5128) +- [Incomplete commondir support](https://github.com/libgit2/libgit2/pull/4967) +- [Remove warnings](https://github.com/libgit2/libgit2/pull/5078) +- [Re-run flaky tests](https://github.com/libgit2/libgit2/pull/5140) +- [errors: use lowercase](https://github.com/libgit2/libgit2/pull/5137) +- [largefile tests: only write 2GB on 32-bit platforms](https://github.com/libgit2/libgit2/pull/5136) +- [Fix broken link in README](https://github.com/libgit2/libgit2/pull/5129) +- [net: remove unused `git_headlist_cb`](https://github.com/libgit2/libgit2/pull/5122) +- [cmake: default NTLM client to off if no HTTPS support](https://github.com/libgit2/libgit2/pull/5124) +- [attr: rename constants and macros for consistency](https://github.com/libgit2/libgit2/pull/5119) +- [Change API instances of `fromnoun` to `from_noun` (with an underscore)](https://github.com/libgit2/libgit2/pull/5117) +- [object: rename git_object__size to git_object_size](https://github.com/libgit2/libgit2/pull/5118) +- [Replace fnmatch with wildmatch](https://github.com/libgit2/libgit2/pull/5110) +- [Documentation fixes](https://github.com/libgit2/libgit2/pull/5111) +- [Removal of `p_fallocate`](https://github.com/libgit2/libgit2/pull/5114) +- [Modularize our TLS & hash detection](https://github.com/libgit2/libgit2/pull/5055) +- [tests: merge::analysis: use test variants to avoid duplicated test suites](https://github.com/libgit2/libgit2/pull/5109) +- [Rename options initialization functions](https://github.com/libgit2/libgit2/pull/5101) +- [deps: ntlmclient: disable implicit fallthrough warnings](https://github.com/libgit2/libgit2/pull/5112) +- [gitignore with escapes](https://github.com/libgit2/libgit2/pull/5097) +- [Handle URLs with a colon after host but no port](https://github.com/libgit2/libgit2/pull/5108) +- [Merge analysis support for bare repos](https://github.com/libgit2/libgit2/pull/5022) +- [Add memleak check docs](https://github.com/libgit2/libgit2/pull/5104) +- [Data-driven tests](https://github.com/libgit2/libgit2/pull/5098) +- [sha1dc: update to fix endianess issues on AIX/HP-UX](https://github.com/libgit2/libgit2/pull/5107) +- [Add NTLM support for HTTP(s) servers and proxies](https://github.com/libgit2/libgit2/pull/5052) +- [Callback type names should be suffixed with `_cb`](https://github.com/libgit2/libgit2/pull/5102) +- [tests: checkout: fix symlink.git being created outside of sandbox](https://github.com/libgit2/libgit2/pull/5099) +- [ignore: handle escaped trailing whitespace](https://github.com/libgit2/libgit2/pull/5095) +- [Ignore: only treat one leading slash as a root identifier](https://github.com/libgit2/libgit2/pull/5074) +- [online tests: use gitlab for auth failures](https://github.com/libgit2/libgit2/pull/5094) +- [Ignore files: don't ignore whitespace](https://github.com/libgit2/libgit2/pull/5076) +- [cache: fix cache eviction using deallocated key](https://github.com/libgit2/libgit2/pull/5088) +- [SECURITY.md: split out security-relevant bits from readme](https://github.com/libgit2/libgit2/pull/5085) +- [Restore NetBSD support](https://github.com/libgit2/libgit2/pull/5086) +- [repository: fix garbage return value](https://github.com/libgit2/libgit2/pull/5084) +- [cmake: disable fallthrough warnings for PCRE](https://github.com/libgit2/libgit2/pull/5083) +- [Configuration parsing: validate section headers with quotes](https://github.com/libgit2/libgit2/pull/5073) +- [Loosen restriction on wildcard "*" refspecs](https://github.com/libgit2/libgit2/pull/5060) +- [Use PCRE for our fallback regex engine when regcomp_l is unavailable](https://github.com/libgit2/libgit2/pull/4935) +- [Remote URL last-chance resolution](https://github.com/libgit2/libgit2/pull/5062) +- [Skip UTF8 BOM in ignore files](https://github.com/libgit2/libgit2/pull/5075) +- [We've already added `ZLIB_LIBRARIES` to `LIBGIT2_LIBS` so don't also add the `z` library](https://github.com/libgit2/libgit2/pull/5080) +- [Define SYMBOLIC_LINK_FLAG_DIRECTORY if required](https://github.com/libgit2/libgit2/pull/5077) +- [Support symlinks for directories in win32](https://github.com/libgit2/libgit2/pull/5065) +- [rebase: orig_head and onto accessors](https://github.com/libgit2/libgit2/pull/5057) +- [cmake: correctly detect if system provides `regcomp`](https://github.com/libgit2/libgit2/pull/5063) +- [Correctly write to missing locked global config](https://github.com/libgit2/libgit2/pull/5023) +- [[RFC] util: introduce GIT_DOWNCAST macro](https://github.com/libgit2/libgit2/pull/4561) +- [examples: implement SSH authentication](https://github.com/libgit2/libgit2/pull/5051) +- [git_repository_init: stop traversing at windows root](https://github.com/libgit2/libgit2/pull/5050) +- [config_file: check result of git_array_alloc](https://github.com/libgit2/libgit2/pull/5053) +- [patch_parse.c: Handle CRLF in parse_header_start](https://github.com/libgit2/libgit2/pull/5027) +- [fix typo](https://github.com/libgit2/libgit2/pull/5045) +- [sha1: don't inline `git_hash_global_init` for win32](https://github.com/libgit2/libgit2/pull/5039) +- [ignore: treat paths with trailing "/" as directories](https://github.com/libgit2/libgit2/pull/5040) +- [Test that largefiles can be read through the tree API](https://github.com/libgit2/libgit2/pull/4874) +- [Tests for symlinked user config](https://github.com/libgit2/libgit2/pull/5034) +- [patch_parse: fix parsing addition/deletion of file with space](https://github.com/libgit2/libgit2/pull/5035) +- [Optimize string comparisons](https://github.com/libgit2/libgit2/pull/5018) +- [Negation of subdir ignore causes other subdirs to be unignored](https://github.com/libgit2/libgit2/pull/5020) +- [xdiff: fix typo](https://github.com/libgit2/libgit2/pull/5024) +- [docs: clarify relation of safe and forced checkout strategy](https://github.com/libgit2/libgit2/pull/5032) +- [Each hash implementation should define `git_hash_global_init`](https://github.com/libgit2/libgit2/pull/5026) +- [[Doc] Update URL to git2-rs](https://github.com/libgit2/libgit2/pull/5012) +- [remote: Rename git_remote_completion_type to _t](https://github.com/libgit2/libgit2/pull/5008) +- [odb: provide a free function for custom backends](https://github.com/libgit2/libgit2/pull/5005) +- [Have git_branch_lookup accept GIT_BRANCH_ALL](https://github.com/libgit2/libgit2/pull/5000) +- [Rename git_transfer_progress to git_indexer_progress](https://github.com/libgit2/libgit2/pull/4997) +- [High-level map APIs](https://github.com/libgit2/libgit2/pull/4901) +- [refdb_fs: fix loose/packed refs lookup racing with repacks](https://github.com/libgit2/libgit2/pull/4984) +- [Allocator restructuring](https://github.com/libgit2/libgit2/pull/4998) +- [cache: fix misnaming of `git_cache_free`](https://github.com/libgit2/libgit2/pull/4992) +- [examples: produce single cgit2 binary](https://github.com/libgit2/libgit2/pull/4956) +- [Remove public 'inttypes.h' header](https://github.com/libgit2/libgit2/pull/4991) +- [Prevent reading out of bounds memory](https://github.com/libgit2/libgit2/pull/4996) +- [Fix a memory leak in odb_otype_fast()](https://github.com/libgit2/libgit2/pull/4987) +- [Make stdalloc__reallocarray call stdalloc__realloc](https://github.com/libgit2/libgit2/pull/4986) +- [Remove `git_time_monotonic`](https://github.com/libgit2/libgit2/pull/4990) +- [Fix a _very_ improbable memory leak in git_odb_new()](https://github.com/libgit2/libgit2/pull/4988) +- [ci: publish documentation on merge](https://github.com/libgit2/libgit2/pull/4989) +- [Enable creation of worktree from bare repo's default branch](https://github.com/libgit2/libgit2/pull/4982) +- [Allow bypassing check for '.keep' file](https://github.com/libgit2/libgit2/pull/4965) +- [Deprecation: export the deprecated functions properly](https://github.com/libgit2/libgit2/pull/4979) +- [ci: skip ssh tests on macOS nightly](https://github.com/libgit2/libgit2/pull/4980) +- [CI build fixups](https://github.com/libgit2/libgit2/pull/4976) +- [v0.28 rc1](https://github.com/libgit2/libgit2/pull/4970) +- [Docs](https://github.com/libgit2/libgit2/pull/4968) +- [Documentation fixes](https://github.com/libgit2/libgit2/pull/4954) +- [ci: add an individual coverity pipeline](https://github.com/libgit2/libgit2/pull/4964) +- [ci: run docurium to create documentation](https://github.com/libgit2/libgit2/pull/4961) +- [ci: return coverity to the nightlies](https://github.com/libgit2/libgit2/pull/4962) +- [Clean up some warnings](https://github.com/libgit2/libgit2/pull/4950) +- [Nightlies: use `latest` docker images](https://github.com/libgit2/libgit2/pull/4869) +- [index: preserve extension parsing errors](https://github.com/libgit2/libgit2/pull/4858) +- [Deprecate functions and constants more gently](https://github.com/libgit2/libgit2/pull/4952) +- [Don't use deprecated constants](https://github.com/libgit2/libgit2/pull/4957) +- [Fix VS warning C4098: 'giterr_set_str' : void function returning a value](https://github.com/libgit2/libgit2/pull/4955) +- [Move `giterr` to `git_error`](https://github.com/libgit2/libgit2/pull/4917) +- [odb: Fix odb foreach to also close on positive error code](https://github.com/libgit2/libgit2/pull/4949) +- [repository: free memory in symlink detection function](https://github.com/libgit2/libgit2/pull/4948) +- [ci: update poxyproxy, run in quiet mode](https://github.com/libgit2/libgit2/pull/4947) +- [Add/multiply with overflow tweaks](https://github.com/libgit2/libgit2/pull/4945) +- [Improve deprecation of old enums](https://github.com/libgit2/libgit2/pull/4944) +- [Move `git_ref_t` to `git_reference_t`](https://github.com/libgit2/libgit2/pull/4939) +- [More `git_obj` to `git_object` updates](https://github.com/libgit2/libgit2/pull/4940) +- [ci: only run invasive tests in nightly](https://github.com/libgit2/libgit2/pull/4943) +- [Always build a cdecl library](https://github.com/libgit2/libgit2/pull/4930) +- [changelog: document changes since 0.27](https://github.com/libgit2/libgit2/pull/4932) +- [Fix a bunch of warnings](https://github.com/libgit2/libgit2/pull/4925) +- [mailmap: prefer ethomson@edwardthomson.com](https://github.com/libgit2/libgit2/pull/4941) +- [Convert tests/resources/push.sh to LF endings](https://github.com/libgit2/libgit2/pull/4937) +- [Get rid of some test files that were accidentally committed](https://github.com/libgit2/libgit2/pull/4936) +- [Fix crash on remote connection when GIT_PROXY_AUTO is set but no proxy is detected](https://github.com/libgit2/libgit2/pull/4934) +- [Make ENABLE_WERROR actually work](https://github.com/libgit2/libgit2/pull/4924) +- [Remove unconditional -Wno-deprecated-declaration on macOS](https://github.com/libgit2/libgit2/pull/4931) +- [Fix warning 'function': incompatible types - from 'git_cvar_value *' to 'int *' (C4133) on VS](https://github.com/libgit2/libgit2/pull/4926) +- [Fix Linux warnings](https://github.com/libgit2/libgit2/pull/4928) +- [Coverity fixes](https://github.com/libgit2/libgit2/pull/4922) +- [Shutdown callback count](https://github.com/libgit2/libgit2/pull/4919) +- [Update CRLF filtering to match modern git](https://github.com/libgit2/libgit2/pull/4904) +- [refdb_fs: refactor error handling in `refdb_reflog_fs__delete`](https://github.com/libgit2/libgit2/pull/4915) +- [Remove empty (sub-)directories when deleting refs](https://github.com/libgit2/libgit2/pull/4833) +- [Support creating annotated commits from annotated tags](https://github.com/libgit2/libgit2/pull/4910) +- [Fix segfault in loose_backend__readstream](https://github.com/libgit2/libgit2/pull/4906) +- [make proxy_stream_close close target stream even on errors](https://github.com/libgit2/libgit2/pull/4905) +- [Index API updates for consistency](https://github.com/libgit2/libgit2/pull/4807) +- [Allow merge analysis against any reference](https://github.com/libgit2/libgit2/pull/4770) +- [revwalk: Allow changing hide_cb](https://github.com/libgit2/libgit2/pull/4888) +- [Unused function warnings](https://github.com/libgit2/libgit2/pull/4895) +- [Add builtin proxy support for the http transport](https://github.com/libgit2/libgit2/pull/4870) +- [config: fix adding files if their parent directory is a file](https://github.com/libgit2/libgit2/pull/4898) +- [Allow certificate and credential callbacks to decline to act](https://github.com/libgit2/libgit2/pull/4879) +- [Fix warning C4133 incompatible types in MSVC](https://github.com/libgit2/libgit2/pull/4896) +- [index: introduce git_index_iterator](https://github.com/libgit2/libgit2/pull/4884) +- [commit: fix out-of-bound reads when parsing truncated author fields](https://github.com/libgit2/libgit2/pull/4894) +- [tests: 🌀 address two null argument instances #4847](https://github.com/libgit2/libgit2/pull/4847) +- [Some OpenSSL issues](https://github.com/libgit2/libgit2/pull/4875) +- [worktree: Expose git_worktree_add_init_options](https://github.com/libgit2/libgit2/pull/4892) +- [transport/http: Include non-default ports in Host header](https://github.com/libgit2/libgit2/pull/4882) +- [Support symlinks on Windows when core.symlinks=true](https://github.com/libgit2/libgit2/pull/4713) +- [strntol: fix out-of-bounds reads when parsing numbers with leading sign](https://github.com/libgit2/libgit2/pull/4886) +- [apply: small fixups in the test suite](https://github.com/libgit2/libgit2/pull/4885) +- [signature: fix out-of-bounds read when parsing timezone offset](https://github.com/libgit2/libgit2/pull/4883) +- [Remote creation API](https://github.com/libgit2/libgit2/pull/4667) +- [Index collision fixes](https://github.com/libgit2/libgit2/pull/4818) +- [Patch (diff) application](https://github.com/libgit2/libgit2/pull/4705) +- [smart transport: only clear url on hard reset (regression)](https://github.com/libgit2/libgit2/pull/4880) +- [Tree parsing fixes](https://github.com/libgit2/libgit2/pull/4871) +- [CI: Fix macOS leak detection](https://github.com/libgit2/libgit2/pull/4860) +- [README: more CI status badges](https://github.com/libgit2/libgit2/pull/4800) +- [ci: Fix some minor issues](https://github.com/libgit2/libgit2/pull/4867) +- [Object parse fixes](https://github.com/libgit2/libgit2/pull/4864) +- [Windows CI: fail build on test failure](https://github.com/libgit2/libgit2/pull/4862) +- [ci: run all the jobs during nightly builds](https://github.com/libgit2/libgit2/pull/4863) +- [strtol removal](https://github.com/libgit2/libgit2/pull/4851) +- [ buf::oom tests: use custom allocator for oom failures](https://github.com/libgit2/libgit2/pull/4854) +- [ci: arm docker builds](https://github.com/libgit2/libgit2/pull/4804) +- [Win32 path canonicalization refactoring](https://github.com/libgit2/libgit2/pull/4852) +- [Check object existence when creating a tree from an index](https://github.com/libgit2/libgit2/pull/4840) +- [Ninja build](https://github.com/libgit2/libgit2/pull/4841) +- [docs: fix transparent/opaque confusion in the conventions file](https://github.com/libgit2/libgit2/pull/4853) +- [Configuration variables can appear on the same line as the section header](https://github.com/libgit2/libgit2/pull/4819) +- [path: export the dotgit-checking functions](https://github.com/libgit2/libgit2/pull/4849) +- [cmake: correct comment from libssh to libssh2](https://github.com/libgit2/libgit2/pull/4850) +- [Object parsing fuzzer](https://github.com/libgit2/libgit2/pull/4845) +- [config: Port config_file_fuzzer to the new in-memory backend.](https://github.com/libgit2/libgit2/pull/4842) +- [Add some more tests for git_futils_rmdir_r and some cleanup](https://github.com/libgit2/libgit2/pull/4828) +- [diff_stats: use git's formatting of renames with common directories](https://github.com/libgit2/libgit2/pull/4830) +- [ignore unsupported http authentication contexts](https://github.com/libgit2/libgit2/pull/4839) +- [submodule: ignore path and url attributes if they look like options](https://github.com/libgit2/libgit2/pull/4837) +- [Smart packet security fixes](https://github.com/libgit2/libgit2/pull/4836) +- [config_file: properly ignore includes without "path" value](https://github.com/libgit2/libgit2/pull/4832) +- [int-conversion](https://github.com/libgit2/libgit2/pull/4831) +- [cmake: enable new quoted argument policy CMP0054](https://github.com/libgit2/libgit2/pull/4829) +- [fix check if blob is uninteresting when inserting tree to packbuilder](https://github.com/libgit2/libgit2/pull/4824) +- [Documentation fixups](https://github.com/libgit2/libgit2/pull/4827) +- [CI: refactoring](https://github.com/libgit2/libgit2/pull/4812) +- [In-memory configuration](https://github.com/libgit2/libgit2/pull/4767) +- [Some warnings](https://github.com/libgit2/libgit2/pull/4784) +- [index: release the snapshot instead of freeing the index](https://github.com/libgit2/libgit2/pull/4803) +- [online::clone: free url and username before resetting](https://github.com/libgit2/libgit2/pull/4816) +- [git_remote_prune to be O(n * logn)](https://github.com/libgit2/libgit2/pull/4794) +- [Rename "VSTS" to "Azure DevOps" and "Azure Pipelines"](https://github.com/libgit2/libgit2/pull/4813) +- [cmake: enable -Wformat and -Wformat-security](https://github.com/libgit2/libgit2/pull/4810) +- [Fix revwalk limiting regression](https://github.com/libgit2/libgit2/pull/4809) +- [path validation: `char` is not signed by default.](https://github.com/libgit2/libgit2/pull/4805) +- [revwalk: refer the sorting modes more to git's options](https://github.com/libgit2/libgit2/pull/4811) +- [Clar XML output redux](https://github.com/libgit2/libgit2/pull/4778) +- [remote: store the connection data in a private struct](https://github.com/libgit2/libgit2/pull/4785) +- [docs: clarify and include licenses of dependencies](https://github.com/libgit2/libgit2/pull/4789) +- [config_file: fix quadratic behaviour when adding config multivars](https://github.com/libgit2/libgit2/pull/4799) +- [config: Fix a leak parsing multi-line config entries](https://github.com/libgit2/libgit2/pull/4792) +- [Prevent heap-buffer-overflow](https://github.com/libgit2/libgit2/pull/4797) +- [ci: remove travis](https://github.com/libgit2/libgit2/pull/4790) +- [Update VSTS YAML files with the latest syntax](https://github.com/libgit2/libgit2/pull/4791) +- [Documentation fixes](https://github.com/libgit2/libgit2/pull/4788) +- [config: convert unbounded recursion into a loop](https://github.com/libgit2/libgit2/pull/4781) +- [Document giterr_last() use only after error. #4772](https://github.com/libgit2/libgit2/pull/4773) +- [util: make the qsort_r check work on macOS](https://github.com/libgit2/libgit2/pull/4765) +- [fuzzer: update for indexer changes](https://github.com/libgit2/libgit2/pull/4782) +- [tree: accept null ids in existing trees when updating](https://github.com/libgit2/libgit2/pull/4727) +- [Pack file verification](https://github.com/libgit2/libgit2/pull/4374) +- [cmake: detect and use libc-provided iconv](https://github.com/libgit2/libgit2/pull/4777) +- [Coverity flavored clang analyzer fixes](https://github.com/libgit2/libgit2/pull/4774) +- [tests: verify adding index conflicts with invalid filemodes fails](https://github.com/libgit2/libgit2/pull/4776) +- [worktree: unlock should return 1 when the worktree isn't locked](https://github.com/libgit2/libgit2/pull/4769) +- [Add a fuzzer for config files](https://github.com/libgit2/libgit2/pull/4752) +- [Fix 'invalid packet line' for ng packets containing errors](https://github.com/libgit2/libgit2/pull/4763) +- [Fix leak in index.c](https://github.com/libgit2/libgit2/pull/4768) +- [threads::diff: use separate git_repository objects](https://github.com/libgit2/libgit2/pull/4754) +- [travis: remove Coverity cron job](https://github.com/libgit2/libgit2/pull/4766) +- [parse: Do not initialize the content in context to NULL](https://github.com/libgit2/libgit2/pull/4749) +- [config_file: Don't crash on options without a section](https://github.com/libgit2/libgit2/pull/4750) +- [ci: Correct the status code check so Coverity doesn't force-fail Travis](https://github.com/libgit2/libgit2/pull/4764) +- [ci: remove appveyor](https://github.com/libgit2/libgit2/pull/4760) +- [diff: fix OOM on AIX when finding similar deltas in empty diff](https://github.com/libgit2/libgit2/pull/4761) +- [travis: do not execute Coverity analysis for all cron jobs](https://github.com/libgit2/libgit2/pull/4755) +- [ci: enable compilation with "-Werror"](https://github.com/libgit2/libgit2/pull/4759) +- [smart_pkt: fix potential OOB-read when processing ng packet](https://github.com/libgit2/libgit2/pull/4758) +- [Fix a double-free in config parsing](https://github.com/libgit2/libgit2/pull/4751) +- [Fuzzers](https://github.com/libgit2/libgit2/pull/4728) +- [ci: run VSTS builds on master and maint branches](https://github.com/libgit2/libgit2/pull/4746) +- [Windows: default credentials / fallback credential handling](https://github.com/libgit2/libgit2/pull/4743) +- [ci: add VSTS build badge to README](https://github.com/libgit2/libgit2/pull/4745) +- [ci: set PKG_CONFIG_PATH for travis](https://github.com/libgit2/libgit2/pull/4744) +- [CI: Refactor and introduce VSTS builds](https://github.com/libgit2/libgit2/pull/4723) +- [revwalk: remove tautologic condition for hiding a commit](https://github.com/libgit2/libgit2/pull/4742) +- [winhttp: retry erroneously failing requests](https://github.com/libgit2/libgit2/pull/4731) +- [Add a configurable limit to the max pack size that will be indexed](https://github.com/libgit2/libgit2/pull/4721) +- [mbedtls: remove unused variable "cacert"](https://github.com/libgit2/libgit2/pull/4739) +- [Squash some leaks](https://github.com/libgit2/libgit2/pull/4732) +- [Add a checkout example](https://github.com/libgit2/libgit2/pull/4692) +- [Assorted Coverity fixes](https://github.com/libgit2/libgit2/pull/4702) +- [Remove GIT_PKT_PACK entirely](https://github.com/libgit2/libgit2/pull/4704) +- [ ignore: improve `git_ignore_path_is_ignored` description Git analogy](https://github.com/libgit2/libgit2/pull/4722) +- [alloc: don't overwrite allocator during init if set](https://github.com/libgit2/libgit2/pull/4724) +- [C90 standard compliance](https://github.com/libgit2/libgit2/pull/4700) +- [Delta OOB access](https://github.com/libgit2/libgit2/pull/4719) +- [Release v0.27.3](https://github.com/libgit2/libgit2/pull/4717) +- [streams: report OpenSSL errors if global init fails](https://github.com/libgit2/libgit2/pull/4710) +- [patch_parse: populate line numbers while parsing diffs](https://github.com/libgit2/libgit2/pull/4687) +- [Fix git_worktree_validate failing on bare repositories](https://github.com/libgit2/libgit2/pull/4686) +- [git_refspec_transform: Handle NULL dst](https://github.com/libgit2/libgit2/pull/4699) +- [Add a "dirty" state to the index when it has unsaved changes](https://github.com/libgit2/libgit2/pull/4536) +- [refspec: rename `git_refspec__free` to `git_refspec__dispose`](https://github.com/libgit2/libgit2/pull/4709) +- [streams: openssl: Handle error in SSL_CTX_new](https://github.com/libgit2/libgit2/pull/4701) +- [refspec: add public parsing api](https://github.com/libgit2/libgit2/pull/4519) +- [Fix interaction between limited flag and sorting over resets](https://github.com/libgit2/libgit2/pull/4688) +- [deps: fix implicit fallthrough warning in http-parser](https://github.com/libgit2/libgit2/pull/4691) +- [Fix assorted leaks found via fuzzing](https://github.com/libgit2/libgit2/pull/4698) +- [Fix type confusion in git_smart__connect](https://github.com/libgit2/libgit2/pull/4695) +- [Verify ref_pkt's are long enough](https://github.com/libgit2/libgit2/pull/4696) +- [Config parser cleanups](https://github.com/libgit2/libgit2/pull/4411) +- [Fix last references to deprecated git_buf_free](https://github.com/libgit2/libgit2/pull/4685) +- [revwalk: avoid walking the entire history when output is unsorted](https://github.com/libgit2/libgit2/pull/4606) +- [Add mailmap support.](https://github.com/libgit2/libgit2/pull/4586) +- [tree: remove unused functions](https://github.com/libgit2/libgit2/pull/4683) +- [Link `mbedTLS` libraries in when `SHA1_BACKEND` == "mbedTLS"](https://github.com/libgit2/libgit2/pull/4678) +- [editorconfig: allow trailing whitespace in markdown](https://github.com/libgit2/libgit2/pull/4676) +- [docs: fix statement about tab width](https://github.com/libgit2/libgit2/pull/4681) +- [diff: fix enum value being out of allowed range](https://github.com/libgit2/libgit2/pull/4680) +- [pack: rename `git_packfile_stream_free`](https://github.com/libgit2/libgit2/pull/4436) +- [Stop leaking the memory](https://github.com/libgit2/libgit2/pull/4677) +- [Bugfix release v0.27.2](https://github.com/libgit2/libgit2/pull/4632) +- [Fix stash save bug with fast path index check](https://github.com/libgit2/libgit2/pull/4668) +- [path: unify `git_path_is_*` APIs](https://github.com/libgit2/libgit2/pull/4662) +- [Fix negative gitignore rules with leading directories ](https://github.com/libgit2/libgit2/pull/4670) +- [Custom memory allocators](https://github.com/libgit2/libgit2/pull/4576) +- [index: Fix alignment issues in write_disk_entry()](https://github.com/libgit2/libgit2/pull/4655) +- [travis: war on leaks](https://github.com/libgit2/libgit2/pull/4558) +- [refdb_fs: fix regression: failure when globbing for non-existant references](https://github.com/libgit2/libgit2/pull/4665) +- [tests: submodule: do not rely on config iteration order](https://github.com/libgit2/libgit2/pull/4673) +- [Detect duplicated submodules for the same path](https://github.com/libgit2/libgit2/pull/4641) +- [Fix docurium missing includes](https://github.com/libgit2/libgit2/pull/4530) +- [github: update issue template](https://github.com/libgit2/libgit2/pull/4627) +- [streams: openssl: add missing check on OPENSSL_LEGACY_API](https://github.com/libgit2/libgit2/pull/4661) +- [mbedtls: don't require mbedtls from our pkgconfig file](https://github.com/libgit2/libgit2/pull/4656) +- [Fixes for CVE 2018-11235](https://github.com/libgit2/libgit2/pull/4660) +- [Backport fixes for CVE 2018-11235](https://github.com/libgit2/libgit2/pull/4659) +- [Added note about Windows junction points to the differences from git document](https://github.com/libgit2/libgit2/pull/4653) +- [cmake: resolve libraries found by pkg-config ](https://github.com/libgit2/libgit2/pull/4642) +- [refdb_fs: enhance performance of globbing](https://github.com/libgit2/libgit2/pull/4629) +- [global: adjust init count under lock](https://github.com/libgit2/libgit2/pull/4645) +- [Fix GCC 8.1 warnings](https://github.com/libgit2/libgit2/pull/4646) +- [Worktrees can be made from bare repositories](https://github.com/libgit2/libgit2/pull/4630) +- [docs: add documentation to state differences from the git cli](https://github.com/libgit2/libgit2/pull/4605) +- [Sanitize the hunk header to ensure it contains UTF-8 valid data](https://github.com/libgit2/libgit2/pull/4542) +- [examples: ls-files: add ls-files to list paths in the index](https://github.com/libgit2/libgit2/pull/4380) +- [OpenSSL legacy API cleanups](https://github.com/libgit2/libgit2/pull/4608) +- [worktree: add functions to get name and path](https://github.com/libgit2/libgit2/pull/4640) +- [Fix deletion of unrelated branch on worktree](https://github.com/libgit2/libgit2/pull/4633) +- [mbedTLS support](https://github.com/libgit2/libgit2/pull/4173) +- [Configuration entry iteration in order](https://github.com/libgit2/libgit2/pull/4525) +- [blame_git: fix coalescing step never being executed](https://github.com/libgit2/libgit2/pull/4580) +- [Fix leaks in master](https://github.com/libgit2/libgit2/pull/4636) +- [Leak fixes for v0.27.1](https://github.com/libgit2/libgit2/pull/4635) +- [worktree: Read worktree specific reflog for HEAD](https://github.com/libgit2/libgit2/pull/4577) +- [fixed stack smashing due to wrong size of struct stat on the stack](https://github.com/libgit2/libgit2/pull/4631) +- [scripts: add backporting script](https://github.com/libgit2/libgit2/pull/4476) +- [worktree: add ability to create worktree with pre-existing branch](https://github.com/libgit2/libgit2/pull/4524) +- [refs: preserve the owning refdb when duping reference](https://github.com/libgit2/libgit2/pull/4618) +- [Submodules-API should report .gitmodules parse errors instead of ignoring them](https://github.com/libgit2/libgit2/pull/4522) +- [Typedef git_pkt_type and clarify recv_pkt return type](https://github.com/libgit2/libgit2/pull/4514) +- [online::clone: validate user:pass in HTTP_PROXY](https://github.com/libgit2/libgit2/pull/4556) +- [ transports: ssh: disconnect session before freeing it ](https://github.com/libgit2/libgit2/pull/4596) +- [revwalk: fix uninteresting revs sometimes not limiting graphwalk](https://github.com/libgit2/libgit2/pull/4622) +- [attr_file: fix handling of directory patterns with trailing spaces](https://github.com/libgit2/libgit2/pull/4614) +- [transports: local: fix assert when fetching into repo with symrefs](https://github.com/libgit2/libgit2/pull/4613) +- [remote/proxy: fix git_transport_certificate_check_db description](https://github.com/libgit2/libgit2/pull/4597) +- [Flag options in describe.h as being optional](https://github.com/libgit2/libgit2/pull/4587) +- [diff: Add missing GIT_DELTA_TYPECHANGE -> 'T' mapping.](https://github.com/libgit2/libgit2/pull/4611) +- [appveyor: fix typo in registry key to disable DHE](https://github.com/libgit2/libgit2/pull/4609) +- [Fix build with LibreSSL 2.7](https://github.com/libgit2/libgit2/pull/4607) +- [appveyor: workaround for intermittent test failures](https://github.com/libgit2/libgit2/pull/4603) +- [sha1dc: update to fix errors with endianess](https://github.com/libgit2/libgit2/pull/4601) +- [submodule: check index for path and prefix before adding submodule](https://github.com/libgit2/libgit2/pull/4378) +- [odb: mempack: fix leaking objects when freeing mempacks](https://github.com/libgit2/libgit2/pull/4602) +- [types: remove unused git_merge_result](https://github.com/libgit2/libgit2/pull/4598) +- [checkout: change default strategy to SAFE](https://github.com/libgit2/libgit2/pull/4531) +- [Add myself to git.git-authors](https://github.com/libgit2/libgit2/pull/4570) + + +## v0.25.0-alpha.16 [(2019-07-23)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.16) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.15...v0.25.0-alpha.16) + +#### Summary of changes +- Adds support for Node 12 +- Updates lodash dependency to address security notice +- Expose Tree.prototype.createUpdated(repo, numUpdates, updates) +- Bumps libgit2 + - Fixes gitignore issue with pattern negation + - Remote.list now gets the correct list of remotes if remotes are changed by external process + +#### Merged PRs into NodeGit +- [Bump libgit2 #1705](https://github.com/nodegit/nodegit/pull/1705) +- [Fix Tree#createUpdated #1704](https://github.com/nodegit/nodegit/pull/1704) +- [Fix failing tests on CI #1703](https://github.com/nodegit/nodegit/pull/1703) +- [Audit lodash and fix package-lock.json #1702](https://github.com/nodegit/nodegit/pull/1702) +- [Implement support for Node 12 #1696](https://github.com/nodegit/nodegit/pull/1696) + +#### Merged PRs into LibGit2 +- [config_file: refresh when creating an iterator #5181](https://github.com/libgit2/libgit2/pull/5181) +- [azure: drop powershell #5141](https://github.com/libgit2/libgit2/pull/5141) +- [fuzzer: use futils instead of fileops #5180](https://github.com/libgit2/libgit2/pull/5180) +- [w32: fix unlinking of directory symlinks #5151](https://github.com/libgit2/libgit2/pull/5151) +- [patch_parse: fix segfault due to line containing static contents #5179](https://github.com/libgit2/libgit2/pull/5179) +- [ignore: fix determining whether a shorter pattern negates another #5173](https://github.com/libgit2/libgit2/pull/5173) +- [patch_parse: handle missing newline indicator in old file #5159](https://github.com/libgit2/libgit2/pull/5159) +- [patch_parse: do not depend on parsed buffer's lifetime #5158](https://github.com/libgit2/libgit2/pull/5158) +- [sha1: fix compilation of WinHTTP backend #5174](https://github.com/libgit2/libgit2/pull/5174) +- [repository: do not initialize HEAD if it's provided by templates #5176](https://github.com/libgit2/libgit2/pull/5176) +- [configuration: cvar -> configmap #5138](https://github.com/libgit2/libgit2/pull/5138) +- [Evict cache items more efficiently #5172](https://github.com/libgit2/libgit2/pull/5172) +- [clar: fix suite count #5175](https://github.com/libgit2/libgit2/pull/5175) +- [Ignore VS2017 specific files and folders #5163](https://github.com/libgit2/libgit2/pull/5163) +- [gitattributes: ignore macros defined in subdirectories #5156](https://github.com/libgit2/libgit2/pull/5156) +- [clar: correctly account for "data" suites when counting #5168](https://github.com/libgit2/libgit2/pull/5168) +- [Allocate memory more efficiently when packing objects #5170](https://github.com/libgit2/libgit2/pull/5170) +- [fileops: fix creation of directory in filesystem root #5131](https://github.com/libgit2/libgit2/pull/5131) +- [win32: fix fuzzers and have CI build them #5160](https://github.com/libgit2/libgit2/pull/5160) +- [Config parser separation #5134](https://github.com/libgit2/libgit2/pull/5134) +- [config_file: implement stat cache to avoid repeated rehashing #5132](https://github.com/libgit2/libgit2/pull/5132) +- [ci: build with ENABLE_WERROR on Windows #5143](https://github.com/libgit2/libgit2/pull/5143) +- [Fix Regression: attr: Correctly load system attr file (on Windows) #5152](https://github.com/libgit2/libgit2/pull/5152) +- [hash: fix missing error return on production builds #5145](https://github.com/libgit2/libgit2/pull/5145) +- [Resolve static check warnings in example code #5142](https://github.com/libgit2/libgit2/pull/5142) +- [Multiple hash algorithms #4438](https://github.com/libgit2/libgit2/pull/4438) +- [More documentation #5128](https://github.com/libgit2/libgit2/pull/5128) +- [Incomplete commondir support #4967](https://github.com/libgit2/libgit2/pull/4967) +- [Remove warnings #5078](https://github.com/libgit2/libgit2/pull/5078) +- [Re-run flaky tests #5140](https://github.com/libgit2/libgit2/pull/5140) + + +## v0.25.0-alpha.15 [(2019-07-15)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.15) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.14...v0.25.0-alpha.15) + +#### Summary of changes +- Removed NSEC optimization due to performance regressions in repositories that did not use NSEC optimization cloned via NodeGit. + +#### Merged PRs into NodeGit +- [Remove NSEC #1699](https://github.com/nodegit/nodegit/pull/1699) + + +## v0.25.0-alpha.14 [(2019-07-01)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.14) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.13...v0.25.0-alpha.14) + +#### Summary of changes +- Always use builtin regex for linux for portability + +#### Merged PRs into NodeGit +- [Use builtin regex library for linux for better portability #1693](https://github.com/nodegit/nodegit/pull/1693) +- [Remove pcre-config from binding.gyp #1694](https://github.com/nodegit/nodegit/pull/1694) + +## v0.25.0-alpha.13 [(2019-06-26)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.13) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.12...v0.25.0-alpha.13) + +#### Summary of changes +- Turn on GIT_USE_NSEC on all platforms +- Use Iconv on OSX for better internationalization support. +- Bump libgit2 to bring in: + - NTLM proxy support + - Negotiate/Kerberos proxy support + - Various git config fixes + - Various git ignore fixes + - Various libgit2 performance improvements + - Windows/Linux now use PCRE for regex, OSX uses regcomp_l, this should address collation issues in diffing +- Fixed bug with Repository.prototype.refreshReferences dying on corrupted reference. We now ignore corrupted references + +#### Merged PRs into NodeGit +- [refresh_references.cc: skip refs that can't be directly resolved #1689](https://github.com/nodegit/nodegit/pull/1689) +- [Bump libgit2 to fork of latest master #1690](https://github.com/nodegit/nodegit/pull/1690) + +#### Merged PRs into LibGit2 +- [errors: use lowercase](https://github.com/libgit2/libgit2/pull/5137) +- [largefile tests: only write 2GB on 32-bit platforms](https://github.com/libgit2/libgit2/pull/5136) +- [Fix broken link in README](https://github.com/libgit2/libgit2/pull/5129) +- [net: remove unused `git_headlist_cb`](https://github.com/libgit2/libgit2/pull/5122) +- [cmake: default NTLM client to off if no HTTPS support](https://github.com/libgit2/libgit2/pull/5124) +- [attr: rename constants and macros for consistency](https://github.com/libgit2/libgit2/pull/5119) +- [Change API instances of `fromnoun` to `from_noun` (with an underscore)](https://github.com/libgit2/libgit2/pull/5117) +- [object: rename git_object__size to git_object_size](https://github.com/libgit2/libgit2/pull/5118) +- [Replace fnmatch with wildmatch](https://github.com/libgit2/libgit2/pull/5110) +- [Documentation fixes](https://github.com/libgit2/libgit2/pull/5111) +- [Removal of `p_fallocate`](https://github.com/libgit2/libgit2/pull/5114) +- [Modularize our TLS & hash detection](https://github.com/libgit2/libgit2/pull/5055) +- [tests: merge::analysis: use test variants to avoid duplicated test suites](https://github.com/libgit2/libgit2/pull/5109) +- [Rename options initialization functions](https://github.com/libgit2/libgit2/pull/5101) +- [deps: ntlmclient: disable implicit fallthrough warnings](https://github.com/libgit2/libgit2/pull/5112) +- [gitignore with escapes](https://github.com/libgit2/libgit2/pull/5097) +- [Handle URLs with a colon after host but no port](https://github.com/libgit2/libgit2/pull/5108) +- [Merge analysis support for bare repos](https://github.com/libgit2/libgit2/pull/5022) +- [Add memleak check docs](https://github.com/libgit2/libgit2/pull/5104) +- [Data-driven tests](https://github.com/libgit2/libgit2/pull/5098) +- [sha1dc: update to fix endianess issues on AIX/HP-UX](https://github.com/libgit2/libgit2/pull/5107) +- [Add NTLM support for HTTP(s) servers and proxies](https://github.com/libgit2/libgit2/pull/5052) +- [Callback type names should be suffixed with `_cb`](https://github.com/libgit2/libgit2/pull/5102) +- [tests: checkout: fix symlink.git being created outside of sandbox](https://github.com/libgit2/libgit2/pull/5099) +- [ignore: handle escaped trailing whitespace](https://github.com/libgit2/libgit2/pull/5095) +- [Ignore: only treat one leading slash as a root identifier](https://github.com/libgit2/libgit2/pull/5074) +- [online tests: use gitlab for auth failures](https://github.com/libgit2/libgit2/pull/5094) +- [Ignore files: don't ignore whitespace](https://github.com/libgit2/libgit2/pull/5076) +- [cache: fix cache eviction using deallocated key](https://github.com/libgit2/libgit2/pull/5088) +- [SECURITY.md: split out security-relevant bits from readme](https://github.com/libgit2/libgit2/pull/5085) +- [Restore NetBSD support](https://github.com/libgit2/libgit2/pull/5086) +- [repository: fix garbage return value](https://github.com/libgit2/libgit2/pull/5084) +- [cmake: disable fallthrough warnings for PCRE](https://github.com/libgit2/libgit2/pull/5083) +- [Configuration parsing: validate section headers with quotes](https://github.com/libgit2/libgit2/pull/5073) +- [Loosen restriction on wildcard "*" refspecs](https://github.com/libgit2/libgit2/pull/5060) +- [Use PCRE for our fallback regex engine when regcomp_l is unavailable](https://github.com/libgit2/libgit2/pull/4935) +- [Remote URL last-chance resolution](https://github.com/libgit2/libgit2/pull/5062) +- [Skip UTF8 BOM in ignore files](https://github.com/libgit2/libgit2/pull/5075) +- [We've already added `ZLIB_LIBRARIES` to `LIBGIT2_LIBS` so don't also add the `z` library](https://github.com/libgit2/libgit2/pull/5080) +- [Define SYMBOLIC_LINK_FLAG_DIRECTORY if required](https://github.com/libgit2/libgit2/pull/5077) +- [Support symlinks for directories in win32](https://github.com/libgit2/libgit2/pull/5065) +- [rebase: orig_head and onto accessors](https://github.com/libgit2/libgit2/pull/5057) +- [cmake: correctly detect if system provides `regcomp`](https://github.com/libgit2/libgit2/pull/5063) +- [Correctly write to missing locked global config](https://github.com/libgit2/libgit2/pull/5023) +- [[RFC] util: introduce GIT_DOWNCAST macro](https://github.com/libgit2/libgit2/pull/4561) +- [examples: implement SSH authentication](https://github.com/libgit2/libgit2/pull/5051) +- [git_repository_init: stop traversing at windows root](https://github.com/libgit2/libgit2/pull/5050) +- [config_file: check result of git_array_alloc](https://github.com/libgit2/libgit2/pull/5053) +- [patch_parse.c: Handle CRLF in parse_header_start](https://github.com/libgit2/libgit2/pull/5027) +- [fix typo](https://github.com/libgit2/libgit2/pull/5045) +- [sha1: don't inline `git_hash_global_init` for win32](https://github.com/libgit2/libgit2/pull/5039) +- [ignore: treat paths with trailing "/" as directories](https://github.com/libgit2/libgit2/pull/5040) +- [Test that largefiles can be read through the tree API](https://github.com/libgit2/libgit2/pull/4874) +- [Tests for symlinked user config](https://github.com/libgit2/libgit2/pull/5034) +- [patch_parse: fix parsing addition/deletion of file with space](https://github.com/libgit2/libgit2/pull/5035) +- [Optimize string comparisons](https://github.com/libgit2/libgit2/pull/5018) +- [Negation of subdir ignore causes other subdirs to be unignored](https://github.com/libgit2/libgit2/pull/5020) +- [xdiff: fix typo](https://github.com/libgit2/libgit2/pull/5024) +- [docs: clarify relation of safe and forced checkout strategy](https://github.com/libgit2/libgit2/pull/5032) +- [Each hash implementation should define `git_hash_global_init`](https://github.com/libgit2/libgit2/pull/5026) +- [[Doc] Update URL to git2-rs](https://github.com/libgit2/libgit2/pull/5012) +- [remote: Rename git_remote_completion_type to _t](https://github.com/libgit2/libgit2/pull/5008) +- [odb: provide a free function for custom backends](https://github.com/libgit2/libgit2/pull/5005) +- [Have git_branch_lookup accept GIT_BRANCH_ALL](https://github.com/libgit2/libgit2/pull/5000) +- [Rename git_transfer_progress to git_indexer_progress](https://github.com/libgit2/libgit2/pull/4997) +- [High-level map APIs](https://github.com/libgit2/libgit2/pull/4901) +- [refdb_fs: fix loose/packed refs lookup racing with repacks](https://github.com/libgit2/libgit2/pull/4984) +- [Allocator restructuring](https://github.com/libgit2/libgit2/pull/4998) +- [cache: fix misnaming of `git_cache_free`](https://github.com/libgit2/libgit2/pull/4992) +- [examples: produce single cgit2 binary](https://github.com/libgit2/libgit2/pull/4956) +- [Remove public 'inttypes.h' header](https://github.com/libgit2/libgit2/pull/4991) +- [Prevent reading out of bounds memory](https://github.com/libgit2/libgit2/pull/4996) +- [Fix a memory leak in odb_otype_fast()](https://github.com/libgit2/libgit2/pull/4987) +- [Make stdalloc__reallocarray call stdalloc__realloc](https://github.com/libgit2/libgit2/pull/4986) +- [Remove `git_time_monotonic`](https://github.com/libgit2/libgit2/pull/4990) +- [Fix a _very_ improbable memory leak in git_odb_new()](https://github.com/libgit2/libgit2/pull/4988) +- [ci: publish documentation on merge](https://github.com/libgit2/libgit2/pull/4989) +- [Enable creation of worktree from bare repo's default branch](https://github.com/libgit2/libgit2/pull/4982) +- [Allow bypassing check for '.keep' file](https://github.com/libgit2/libgit2/pull/4965) +- [Release v0.28.1](https://github.com/libgit2/libgit2/pull/4983) + + + +## v0.25.0-alpha.12 [(2019-06-03)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.12) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.11...v0.25.0-alpha.12) + +#### Summary of changes +- Fix bug in Repository.prototype.refreshReferences where new remote references from a new remote added/fetched on a separte repo instance do not show up in the result. +- Fixed a prototype problem with cherrypick, merge, and other collections that have a function at their root. call, apply, and bind should now be on NodeGit.Cherrypick. +- Bumped libssh2 to resolve security notice. + +#### Merged PRs into NodeGit +- [Bump libssh2 to 1.8.2 and fix some npm audit warnings #1678](https://github.com/nodegit/nodegit/pull/1678) +- [Root functions should keep their function prototypes correctly #1681](https://github.com/nodegit/nodegit/pull/1681) +- [refresh_references.cc: bust LibGit2 remote list cache by reading config #1685](https://github.com/nodegit/nodegit/pull/1685) + + +## v0.25.0-alpha.11 [(2019-05-20)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.11) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.10...v0.25.0-alpha.11) + +#### Summary of changes +- Improve speed and correctness of fileHistoryWalk. The API should not have changed; however, when the end of the walk has been reached, `reachedEndOfHistory` will be specified on the resulting array. + +#### Merged PRs into NodeGit +- [Implement faster file history walk #1676](https://github.com/nodegit/nodegit/pull/1676) + + +## v0.25.0-alpha.10 [(2019-05-03)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.10) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.9...v0.25.0-alpha.10) + +#### Summary of changes +- Drops support for Ubuntu 14 after EOL +- Fixes openssl prebuilt downloads for electron builds +- Fixes commits retrieved from Commit.prototype.parent +- *DEPRECATION* Support signing commits in Repository.prototype.mergeBranches. The last parameter `processMergeMessageCallback` is now deprecated, but will continue to work. Use the options object instead, which will contain the `processMergeMessageCallback`, as well as the `signingCb`. +- Bump Node-Gyp to 4.0.0 to fix tar security vulnerability +- *BREAKING* `getRemotes` no longer returns remote names, it now returns remote objects directly. Use `getRemoteNames` to get a list of remote names. +- Optimized a set of routines in NodeGit. These methods as written in Javascript require hundreds or thousands of requests to async workers to retrieve data. We've batched these requests and performed them on a single async worker. There are now native implementations of the following: + - Repository.prototype.getReferences: Retrieves all references on async worker. + - Repository.prototype.getRemotes: Retrieves all remotes on async worker. + - Repository.prototype.getSubmodules: Retrieves all submodules on async worker. + - Repository.prototype.refreshReferences: Open sourced function from GitKraken. Grabs a lot of information about references on an async worker. + - Revwalk.prototype.commitWalk: Retrieves up to N commits from a revwalk on an async worker. + +#### Merged PRs into NodeGit +- [EOL for Node 6 and Ubuntu 14.04 #1649](https://github.com/nodegit/nodegit/pull/1649) +- [Ensures that commits from parent(*) has a repository #1658](https://github.com/nodegit/nodegit/pull/1658) +- [Update openssl conan distributions #1663](https://github.com/nodegit/nodegit/pull/1663) +- [Support signing in Repository#mergeBranches #1664](https://github.com/nodegit/nodegit/pull/1664) +- [Dependency upgrade node-gyp upgraded to 4.0.0 #1672](https://github.com/nodegit/nodegit/pull/1672) +- [Add additional getters to streamline information gathering (breaking change) #1671](https://github.com/nodegit/nodegit/pull/1671) + + + +## v0.25.0-alpha.9 [(2019-03-04)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.9) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.8...v0.25.0-alpha.9) + +#### Summary of changes +- Removed access to the diff_so_far param in git_diff_notify_cb and git_diff_progress_cb +- Changed FilterSource.prototype.repo to async to prevent segfaults on filters that run during Submodule.status +- Clean up deprecation messages to inform users of what was deprecated, not just what users should switch to +- When installing on a machine that has yarn and does not have npm, the preinstall script should succeed now +- ceiling_dirs is now an optional parameter to Repository.discover + +#### Merged PRs into NodeGit +- [Clean up some dangerous memory accesses in callbacks #1642](https://github.com/nodegit/nodegit/pull/1642) +- [Output the item that was deprecated when giving deprecation notice #1643](https://github.com/nodegit/nodegit/pull/1643) +- [Don't fail yarn installs when we can't find npm #1644](https://github.com/nodegit/nodegit/pull/1644) +- [`ceiling_dirs` parameter in `Repository.discover` is optional #1245](https://github.com/nodegit/nodegit/pull/1245) + + +## v0.25.0-alpha.8 [(2019-02-27)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.8) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.7...v0.25.0-alpha.8) + +#### Summary of changes +- Fixed segfault in NodeGit.Merge.analysis and NodeGit.Merge.analysisForRef + +#### Merged PRs into NodeGit +- [Add missing `shouldAlloc` declarations for git_merge_analysis* functions #1641](https://github.com/nodegit/nodegit/pull/1641) + + +## v0.25.0-alpha.7 [(2019-02-20)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.7) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.6...v0.25.0-alpha.7) + +#### Summary of changes +- Fixed bug where repeated uses of extractSignature would fail because of the use of regex.prototype.match +- Added support for building on IBM i (PASE) machines +- Fixed bug where signingCb in rebases would not return error codes to LibGit2 if the signingCb threw or rejected +- Exposed AnnotatedCommit methods: + - AnnotatedCommit.prototype.ref +- Exposed Apply methods: + - Apply.apply applies a diff to the repository + - Apply.toTree applies a diff to a tree +- Exposed Config methods: + - Config.prototype.deleteEntry + - Config.prototype.deleteMultivar + - Config.prototype.getBool + - Config.prototype.getInt32 + - Config.prototype.getInt64 + - Config.prototype.setMultivar + - Config.prototype.snapshot +- Exposed ConfigIterator with methods: + - ConfigIterator.create + - ConfigIterator.createGlob + - ConfigIterator.createMultivar + - ConfigIterator.prototype.next +- Exposed Merge methods: + - Merge.analysis + - Merge.analysisForRef +- Expose Remote methods: + - Remote.createWithOpts + +#### Merged PRs into NodeGit +- [Fix regex state causing subsequent runs of Tag.extractSignature to fail #1630](https://github.com/nodegit/nodegit/pull/1630) +- [Update LibGit2 docs to v0.28.0 #1631](https://github.com/nodegit/nodegit/pull/1631) +- [Add support for building on IBM i (PASE) #1634](https://github.com/nodegit/nodegit/pull/1634) +- [Expose more config methods #1635](https://github.com/nodegit/nodegit/pull/1635) +- [Catch errors and pass them to libgit2 as error codes in rebase signingcb #1636](https://github.com/nodegit/nodegit/pull/1636) +- [Simplify check for IBM i operating system #1637](https://github.com/nodegit/nodegit/pull/1637) +- [Bump LibGit2 to fork of v0.28.1 #1638](https://github.com/nodegit/nodegit/pull/1638) + + +## v0.25.0-alpha.6 [(2019-02-14)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.6) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.5...v0.25.0-alpha.6) + +#### Summary of changes +- Bumped LibGit2 to v0.28.0. +- Fixed problem with continue rebase preventing users from skipping commits +- Fixed leak where struct/option types were leaking libgit2 pointers + +#### Merged PRs into NodeGit +- [We should clear the persistent cell in structs when they are destroyed #1629](https://github.com/nodegit/nodegit/pull/1629) +- [Fix "errorno" typo #1628](https://github.com/nodegit/nodegit/pull/1628) +- [Bump Libgit2 fork to v0.28.0 #1627](https://github.com/nodegit/nodegit/pull/1627) + + +## v0.25.0-alpha.5 [(2019-02-11)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.5) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.4...v0.25.0-alpha.5) + +#### Summary of changes +- Fixed builds for Electron 4 for real this time + +#### Merged PRs into NodeGit +- [Fix macOS and Windows Electron 4 builds #1626](https://github.com/nodegit/nodegit/pull/1626) + + +## v0.25.0-alpha.4 [(2019-02-08)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.4) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.3...v0.25.0-alpha.4) + +#### Summary of changes +- Fixed bug where signing the init commit failed due to being unable to update the `HEAD` ref. +- Changed `NodeGit.Signature.default` to async, because it actually ends up reading the config. +- Fixed bug where templates were not reporting errors for synchronous methods. It's a bit of a wide net, but in general, + it is now possible certain sync methods in NodeGit will begin failin that did not fail before. This is the correct + behavior. +- Switched `NodeGit.Oid.fromString`'s internal implementation from `git_oid_fromstr` to `git_oid_fromstrp` +- Fixed builds for Electron 4 +- Added `NodeGit.Reference.updateTerminal` + +#### Merged PRs into NodeGit +- [Fix non-existent / dangling refs cause Repository.prototype.createCommitWithSignature to fail #1624](https://github.com/nodegit/nodegit/pull/1624) +- [Handle new gyp information for electron builds #1623](https://github.com/nodegit/nodegit/pull/1623) + + +## v0.25.0-alpha.3 [(2019-02-05)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.3) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.2...v0.25.0-alpha.3) + +#### Summary of changes +- Enforced consistent use of signing callbacks within the application. Any object that implements the signingCallback + pattern for signing commits or tags should use the exact same callback type and with the same meaning. + `type SigningCallback = (content: string) => {| code: number, field?: string, signedData?: string |};` + If the code is `NodeGit.Error.CODE.OK` or 0, the operation will succeed and _at least_ signedData is expected to be filled out. + If the code is a negative number, except for `NodeGit.Error.CODE.PASSTHROUGH`, the signing operation will fail. + If the code is `NodeGit.Error.CODE.PASSTHROUGH`, the operation will continue without signing the object. + +#### Merged PRs into NodeGit +- [Use same API for signingCb in all places that can be crypto signed #1621](https://github.com/nodegit/nodegit/pull/1621) + + +## v0.25.0-alpha.2 [(2019-02-01)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.2) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.25.0-alpha.1...v0.25.0-alpha.2) + +#### Summary of changes +- Added RebaseOptions to repository.prototype.rebaseContinue + +#### Merged PRs into NodeGit +- [Breaking: Repository.prototype.continueRebase enhancements #1619](https://github.com/nodegit/nodegit/pull/1619) + + +## v0.25.0-alpha.1 [(2019-01-30)](https://github.com/nodegit/nodegit/releases/tag/v0.25.0-alpha.1) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.24.0...v0.25.0-alpha.1) + +#### Summary of changes +- Bump Libgit2 to preview of v0.28.0 +- Add signing support for commits and annotated tags +- Updated Signature.prototype.toString to optionally include timestamps +- [BREAKING] Converted Buf.prototype.set and Buf.prototype.grow from async to sync +- Added complete support for libgit2 types: + - git_index_name_entry + - git_index_reuc_entry + - git_mailmap +- Exposed git_path_is_gitfile +- Exposed git_tag_create_frombuffer + +#### Merged PRs into NodeGit +- [adds support for gpg commit signing (fixes #1018) #1448](https://github.com/nodegit/nodegit/pull/1448) +- [Add `updateRef` parameter to Repository#createCommitWithSignature #1610](https://github.com/nodegit/nodegit/pull/1610) +- [Documentation fixes. #1611](https://github.com/nodegit/nodegit/pull/1611) +- [Add Commit#amendWithSignature #1616](https://github.com/nodegit/nodegit/pull/1616) +- [Bump libgit2 to a preview of v0.28 #1615](https://github.com/nodegit/nodegit/pull/1615) +- [Fix issues with Commit#amendWithSignature #1617](https://github.com/nodegit/nodegit/pull/1617) +- [Marked Repository.createBlobFromBuffer as async #1614](https://github.com/nodegit/nodegit/pull/1614) +- [Add functionality for creating Tags with signatures and extracting signatures from Tags #1618](https://github.com/nodegit/nodegit/pull/1618) + + +## v0.24.0 [(2019-01-16)](https://github.com/nodegit/nodegit/releases/tag/v0.24.0) + +[Full Changelog](https://github.com/nodegit/nodegit/compare/v0.23.0...v0.24.0) + +#### Summary of changes +- Garbage collect most of the library. +- All free functions have been removed. The expectation is that they will be collected by the GC. +- All init options methods have been removed. They were never supposed to be exposed in the first place. +- Added support for performing history walks on directories. +- Fix various bugs that led to segfaults or incorrect behavior. +- Removed ssl and crypto dependency from non-electron builds. + +##### Removed methods +Mostly due to missing support anyway, please report anything you were using as an issue. + - NodeGit.Blob.createFromStreamCommit + - NodeGit.Branch.Iterator.prototype.new + - NodeGit.Config.initBackend + - NodeGit.Config.prototype.snapshot + - NodeGit.Config.prototype.setBool + - NodeGit.Config.prototype.setInt32 + - NodeGit.Config.prototype.setInt64 + - NodeGit.Index.prototype.owner + - NodeGit.Note.iteratorNew + - NodeGit.Note.next + - NodeGit.Odb.prototype.addDiskAlternate + - NodeGit.Repository.prototype.configSnapshot + - NodeGit.Signature.prototype.dup + - NodeGit.Tag.foreach + - NodeGit.Transport.init + - NodeGit.Transport.sshWithPaths + - NodeGit.Transport.unregister + +##### Newly exposed methods: + - NodeGit.Config.prototype.getEntry + - NodeGit.Config.prototype.snapshot + - NodeGit.Config.prototype.refresh + - NodeGit.Config.prototype.setBool + - NodeGit.Config.prototype.setInt32 + - NodeGit.Config.prototype.setInt64 + - NodeGit.Diff.prototype.isSortedIcase + - NodeGit.DiffStats.prototype.deletions + - NodeGit.DiffStats.prototype.filesChanged + - NodeGit.DiffStats.prototype.insertions + - NodeGit.DiffStats.prototype.toBuf + - NodeGit.Odb.hashfile + - NodeGit.Odb.prototype.readPrefix + - NodeGit.OidShorten.prototype.add + - NodeGit.OidShorten.create + - NodeGit.PathspecMatchList.prototype.diffEntry + - NodeGit.PathspecMatchList.prototype.entry + - NodeGit.PathspecMatchList.prototype.entrycount + - NodeGit.PathspecMatchList.prototype.failedEntry + - NodeGit.PathspecMatchList.prototype.failedEntryCount + +##### Newly exposed types + - NodeGit.DescribeFormatOptions + - NodeGit.DiffStats + - NodeGit.OidShorten + - NodeGit.PathspecMatchList + +#### Merged PRs into NodeGit +- [Garbage collection: Free mostly everything automatically #1570](https://github.com/nodegit/nodegit/pull/1570) +- [Fix typo in GitHub issue template #1586](https://github.com/nodegit/nodegit/pull/1586) +- [More suitable example about Signature #1582](https://github.com/nodegit/nodegit/pull/1582) +- [Add support for directories when using `fileHistoryWalk` #1583](https://github.com/nodegit/nodegit/pull/1583) +- [Add a test for Repository's getReferenceCommit #1601](https://github.com/nodegit/nodegit/pull/1601) +- [Check parameters before performing reset #1603](https://github.com/nodegit/nodegit/pull/1603) +- [Remove ssl and crypto dependency on non-electron builds #1600](https://github.com/nodegit/nodegit/pull/1600) +- [Clean up libssh2 configure step #1574](https://github.com/nodegit/nodegit/pull/1574) +- [Fix checkout bug in our fork of libgit2 #1609](https://github.com/nodegit/nodegit/pull/1609) +- [Fix segfault in NodeGit.Revert.revert #1605](https://github.com/nodegit/nodegit/pull/1605) + + ## v0.24.0-alpha.1 [(2018-10-25)](https://github.com/nodegit/nodegit/releases/tag/v0.24.0-alpha.1) [Full Changelog](https://github.com/nodegit/nodegit/compare/v0.23.0...v0.24.0-alpha.1) @@ -2156,8 +4192,4 @@ We have added Node 6 as a supported platform! Going forward we aim to have 1:1 s [Full Changelog](https://github.com/nodegit/nodegit/compare/v0.0.1...v0.0.2) -## v0.0.1 [(2011-03-10)](https://github.com/nodegit/nodegit/tree/v0.0.1) - - - -\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)* +## v0.0.1 [(2011-03-10)](https://github.com/nodegit/nodegit/tree/v0.0.1) \ No newline at end of file diff --git a/README.md b/README.md index ddd922d4c3..72442d791d 100644 --- a/README.md +++ b/README.md @@ -2,24 +2,22 @@ > Node bindings to the [libgit2](http://libgit2.github.com/) project. +[![Actions Status](https://github.com/nodegit/nodegit/workflows/Testing/badge.svg)](https://github.com/nodegit/nodegit/actions) -Linux & macOS | Windows | Coverage | Dependencies -------------- | ------- | -------- | ------------- -[![Build Status Travis](https://api.travis-ci.org/nodegit/nodegit.svg?branch=master)](https://travis-ci.org/nodegit/nodegit) | [![Build Status AppVeyor](https://ci.appveyor.com/api/projects/status/e5a5q75l9yfhnfv2?svg=true)](https://ci.appveyor.com/project/timbranyen/nodegit) | [![Coveralls](https://coveralls.io/repos/nodegit/nodegit/badge.svg)](https://coveralls.io/r/nodegit/nodegit) | [![Dependencies](https://david-dm.org/nodegit/nodegit.svg)](https://david-dm.org/nodegit/nodegit) - -**Stable (libgit2@v0.27.3): 0.27.3** +**Stable (libgit2@v0.28.3): 0.28.3** ## Have a problem? Come chat with us! ## Visit [slack.libgit2.org](http://slack.libgit2.org/) to sign up, then join us in #nodegit. ## Maintained by ## -Tim Branyen [@tbranyen](http://twitter.com/tbranyen), -John Haley [@johnhaley81](http://twitter.com/johnhaley81), and -Max Korp [@maxkorp](http://twitter.com/MaximilianoKorp) with help from tons of +Tyler Ang-Wanek [@twwanek](http://twitter.com/twwanek) with help from tons of [awesome contributors](https://github.com/nodegit/nodegit/contributors)! ### Alumni Maintainers ### +Tim Branyen [@tbranyen](http://twitter.com/tbranyen), +John Haley [@johnhaley81](http://twitter.com/johnhaley81), +Max Korp [@maxkorp](http://twitter.com/MaximilianoKorp), Steve Smith [@orderedlist](https://twitter.com/orderedlist), Michael Robinson [@codeofinterest](http://twitter.com/codeofinterest), and Nick Kallen [@nk](http://twitter.com/nk) @@ -76,10 +74,17 @@ In Ubuntu: sudo apt-get install libssl-dev ``` -Additionally, you need `curl-config` on your system. You need one of these packages: - * libcurl4-gnutls-dev - * libcurl4-nss-dev - * libcurl4-openssl-dev +You will need the following libraries installed on your linux machine: + - libpcre + - libpcreposix + - libkrb5 + - libk5crypto + - libcom_err + +When building locally, you will also need development packages for kerberos and pcre, so both of these utilities must be present on your machine: + - pcre-config + - krb5-config + If you are still encountering problems while installing, you should try the [Building from source](http://www.nodegit.org/guides/install/from-source/) diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 34f73e23dd..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,62 +0,0 @@ -# appveyor file -# https://www.appveyor.com/docs/appveyor-yml/ - -image: Visual Studio 2015 - -platform: - - x64 - - x86 - -# build version format -version: "{build}" - -# Set a known clone folder -clone_folder: c:\projects\nodegit - -# fix line endings in Windows -init: - - git config --global core.autocrlf input - - git config --global user.name "John Doe" - - git config --global user.email johndoe@example.com - -# what combinations to test -environment: - JOBS: 4 - GIT_SSH: c:\projects\nodegit\vendor\plink.exe - GYP_MSVS_VERSION: 2015 - matrix: - # Node.js - - nodejs_version: "10" - - nodejs_version: "8" - - nodejs_version: "6" - -matrix: - fast_finish: true - -# Get the latest stable version of Node 0.STABLE.latest -install: - - git submodule update --init --recursive - - ps: Install-Product node $env:nodejs_version $env:platform - - ps: Start-Process c:\projects\nodegit\vendor\pageant.exe c:\projects\nodegit\vendor\private.ppk - - npm install -g npm - - npm install -g node-gyp - - appveyor-retry call npm install - -test_script: - - node --version - - npm --version - - appveyor-retry call npm test - -on_success: - - IF %APPVEYOR_REPO_TAG%==true npm install -g node-pre-gyp - - IF %APPVEYOR_REPO_TAG%==true npm install -g aws-sdk - - IF %APPVEYOR_REPO_TAG%==true node lifecycleScripts\clean - - IF %APPVEYOR_REPO_TAG%==true node-pre-gyp package - - IF %APPVEYOR_REPO_TAG%==true node-pre-gyp publish - -build: off - -branches: - only: - - master - - v0.3 diff --git a/examples/add-and-commit.js b/examples/add-and-commit.js index 4f4959dd78..42215257b8 100644 --- a/examples/add-and-commit.js +++ b/examples/add-and-commit.js @@ -1,9 +1,9 @@ -var nodegit = require("../"); -var path = require("path"); -var fse = require("fs-extra"); -var fileName = "newfile.txt"; -var fileContent = "hello world"; -var directoryName = "salad/toast/strangerinastrangeland/theresnowaythisexists"; +const nodegit = require("../"); +const path = require("path"); +const fs = require("fs"); +const fileName = "newfile.txt"; +const fileContent = "hello world"; +const directoryName = "salad/toast/strangerinastrangeland/theresnowaythisexists"; /** * This example creates a certain file `newfile.txt`, adds it to the git @@ -11,59 +11,38 @@ var directoryName = "salad/toast/strangerinastrangeland/theresnowaythisexists"; * followed by a `git commit` **/ -var repo; -var index; -var oid; -nodegit.Repository.open(path.resolve(__dirname, "../.git")) -.then(function(repoResult) { - repo = repoResult; - return fse.ensureDir(path.join(repo.workdir(), directoryName)); -}).then(function(){ - return fse.writeFile(path.join(repo.workdir(), fileName), fileContent); -}) -.then(function() { - return fse.writeFile( +(async () => { + const repo = await nodegit.Repository.open(path.resolve(__dirname, "../.git")); + + await fs.promises.mkdir(path.join(repo.workdir(), directoryName), { + recursive: true, + }); + + await fs.promises.writeFile(path.join(repo.workdir(), fileName), fileContent); + await fs.promises.writeFile( path.join(repo.workdir(), directoryName, fileName), fileContent ); -}) -.then(function() { - return repo.refreshIndex(); -}) -.then(function(indexResult) { - index = indexResult; -}) -.then(function() { + + const index = await repo.refreshIndex(); + // this file is in the root of the directory and doesn't need a full path - return index.addByPath(fileName); -}) -.then(function() { + await index.addByPath(fileName); // this file is in a subdirectory and can use a relative path - return index.addByPath(path.posix.join(directoryName, fileName)); -}) -.then(function() { + await index.addByPath(path.posix.join(directoryName, fileName)); // this will write both files to the index - return index.write(); -}) -.then(function() { - return index.writeTree(); -}) -.then(function(oidResult) { - oid = oidResult; - return nodegit.Reference.nameToId(repo, "HEAD"); -}) -.then(function(head) { - return repo.getCommit(head); -}) -.then(function(parent) { - var author = nodegit.Signature.now("Scott Chacon", + await index.write(); + + const oid = await index.writeTree(); + + const parent = await repo.getHeadCommit(); + const author = nodegit.Signature.now("Scott Chacon", "schacon@gmail.com"); - var committer = nodegit.Signature.now("Scott A Chacon", + const committer = nodegit.Signature.now("Scott A Chacon", "scott@github.com"); - return repo.createCommit("HEAD", author, committer, "message", oid, [parent]); -}) -.done(function(commitId) { + const commitId = await repo.createCommit("HEAD", author, committer, "message", oid, [parent]); + console.log("New Commit: ", commitId); -}); +})(); diff --git a/examples/clone.js b/examples/clone.js index 2b9937949e..459713d942 100644 --- a/examples/clone.js +++ b/examples/clone.js @@ -14,7 +14,7 @@ fse.remove(path).then(function() { certificateCheck: function() { // github will fail cert check on some OSX machines // this overrides that check - return 1; + return 0; } } } diff --git a/examples/cloneFromGithubWith2Factor.js b/examples/cloneFromGithubWith2Factor.js index 35b08432ec..62e0df8073 100644 --- a/examples/cloneFromGithubWith2Factor.js +++ b/examples/cloneFromGithubWith2Factor.js @@ -20,7 +20,7 @@ var opts = { return nodegit.Cred.userpassPlaintextNew(token, "x-oauth-basic"); }, certificateCheck: function() { - return 1; + return 0; } } } diff --git a/examples/create-new-repo.js b/examples/create-new-repo.js index 1c7e3e9f57..1df93fbbc8 100644 --- a/examples/create-new-repo.js +++ b/examples/create-new-repo.js @@ -1,46 +1,32 @@ -var nodegit = require("../"); -var path = require("path"); -var fse = require("fs-extra"); -var fileName = "newfile.txt"; -var fileContent = "hello world"; -var repoDir = "../../newRepo"; +const nodegit = require("../"); +const path = require("path"); +const fs = require("fs"); +const fileName = "newfile.txt"; +const fileContent = "hello world"; +const repoDir = "../newRepo"; -var repository; -var index; -fse.ensureDir(path.resolve(__dirname, repoDir)) -.then(function() { - return nodegit.Repository.init(path.resolve(__dirname, repoDir), 0); -}) -.then(function(repo) { - repository = repo; - return fse.writeFile(path.join(repository.workdir(), fileName), fileContent); -}) -.then(function(){ - return repository.refreshIndex(); -}) -.then(function(idx) { - index = idx; -}) -.then(function() { - return index.addByPath(fileName); -}) -.then(function() { - return index.write(); -}) -.then(function() { - return index.writeTree(); -}) -.then(function(oid) { - var author = nodegit.Signature.now("Scott Chacon", +(async () => { + await fs.promises.mkdir(path.resolve(__dirname, repoDir), { + recursive: true, + }); + const repo = await nodegit.Repository.init(path.resolve(__dirname, repoDir), 0); + await fs.promises.writeFile(path.join(repo.workdir(), fileName), fileContent); + const index = await repo.refreshIndex(); + await index.addByPath(fileName); + await index.write(); + + const oid = await index.writeTree(); + + const author = nodegit.Signature.now("Scott Chacon", "schacon@gmail.com"); - var committer = nodegit.Signature.now("Scott A Chacon", + const committer = nodegit.Signature.now("Scott A Chacon", "scott@github.com"); - // Since we're creating an inital commit, it has no parents. Note that unlike + // Since we're creating an initial commit, it has no parents. Note that unlike // normal we don't get the head either, because there isn't one yet. - return repository.createCommit("HEAD", author, committer, "message", oid, []); -}) -.done(function(commitId) { + const commitId = await repo.createCommit("HEAD", author, committer, "message", oid, []); console.log("New Commit: ", commitId); -}); +})(); + + diff --git a/examples/details-for-tree-entry.js b/examples/details-for-tree-entry.js index a567fcf75c..13dcd4e210 100644 --- a/examples/details-for-tree-entry.js +++ b/examples/details-for-tree-entry.js @@ -1,29 +1,26 @@ -var nodegit = require("../"); -var path = require("path"); +const nodegit = require("../"); +const path = require("path"); /** * This shows how to get details from a tree entry or a blob **/ -nodegit.Repository.open(path.resolve(__dirname, "../.git")) - .then(function(repo) { - return repo.getTree("e1b0c7ea57bfc5e30ec279402a98168a27838ac9") - .then(function(tree) { - var treeEntry = tree.entryByIndex(0); +(async () => { + const repo = await nodegit.Repository.open(path.resolve(__dirname, "../.git")); + const tree = await repo.getTree("e1b0c7ea57bfc5e30ec279402a98168a27838ac9"); + const treeEntry = tree.entryByIndex(0); + + // Tree entry doesn't have any data associated with the actual entry + // To get that we need to get the index entry that this points to + const index = await repo.refreshIndex(); + const indexEntry = index.getByPath(treeEntry.path()); - // Tree entry doesn't have any data associated with the actual entry - // To get that we need to get the index entry that this points to - return repo.refreshIndex().then(function(index) { - var indexEntry = index.getByPath(treeEntry.path()); + // With the index entry we can now view the details for the tree entry + console.log("Entry path: " + indexEntry.path); + console.log("Entry time in seconds: " + indexEntry.mtime.seconds()); + console.log("Entry oid: " + indexEntry.id.toString()); + console.log("Entry size: " + indexEntry.fileSize); + + console.log("Done!"); +})(); - // With the index entry we can now view the details for the tree entry - console.log("Entry path: " + indexEntry.path); - console.log("Entry time in seconds: " + indexEntry.mtime.seconds()); - console.log("Entry oid: " + indexEntry.id.toString()); - console.log("Entry size: " + indexEntry.fileSize); - }); - }); - }) - .done(function() { - console.log("Done!"); - }); diff --git a/examples/diff-commits.js b/examples/diff-commits.js index f68d0fcfb1..b3d6d75107 100644 --- a/examples/diff-commits.js +++ b/examples/diff-commits.js @@ -1,41 +1,41 @@ -var nodegit = require("../"); -var path = require("path"); +const nodegit = require("../"); +const path = require("path"); // This code examines the diffs between a particular commit and all of its // parents. Since this commit is not a merge, it only has one parent. This is // similar to doing `git show`. -nodegit.Repository.open(path.resolve(__dirname, "../.git")) -.then(function(repo) { - return repo.getCommit("59b20b8d5c6ff8d09518454d4dd8b7b30f095ab5"); -}) -.then(function(commit) { +(async () => { + const repo = await nodegit.Repository.open(path.resolve(__dirname, "../.git")) + const commit = await repo.getCommit("59b20b8d5c6ff8d09518454d4dd8b7b30f095ab5"); console.log("commit " + commit.sha()); - console.log("Author:", commit.author().name() + - " <" + commit.author().email() + ">"); + console.log( + "Author:", commit.author().name() + + " <" + commit.author().email() + ">" + ); console.log("Date:", commit.date()); console.log("\n " + commit.message()); - return commit.getDiff(); -}) -.done(function(diffList) { - diffList.forEach(function(diff) { - diff.patches().then(function(patches) { - patches.forEach(function(patch) { - patch.hunks().then(function(hunks) { - hunks.forEach(function(hunk) { - hunk.lines().then(function(lines) { - console.log("diff", patch.oldFile().path(), - patch.newFile().path()); - console.log(hunk.header().trim()); - lines.forEach(function(line) { - console.log(String.fromCharCode(line.origin()) + - line.content().trim()); - }); - }); - }); - }); - }); - }); - }); -}); + const diffList = await commit.getDiff(); + for (const diff of diffList) { + const patches = await diff.patches(); + for (const patch of patches) { + const hunks = await patch.hunks(); + for (const hunk of hunks) { + const lines = await hunk.lines(); + console.log( + "diff", + patch.oldFile().path(), + patch.newFile().path() + ); + console.log(hunk.header().trim()); + for (const line of lines) { + console.log( + String.fromCharCode(line.origin()) + + line.content().trim() + ); + } + } + } + } +})(); diff --git a/examples/general.js b/examples/general.js index b6ae7efc95..b06c4d7954 100644 --- a/examples/general.js +++ b/examples/general.js @@ -1,12 +1,10 @@ -var nodegit = require("../"); -var path = require("path"); -var oid; -var odb; -var repo; +const nodegit = require("../"); +const path = require("path"); + // **nodegit** is a javascript library for node.js that wraps libgit2, a // pure C implementation of the Git core. It provides an asynchronous -// interface around any functions that do I/O, and a sychronous interface +// interface around any functions that do I/O, and a synchronous interface // around the rest. // // This file is an example of using that API in a real, JS file. @@ -19,348 +17,301 @@ var repo; // Nearly, all git operations in the context of a repository. // To open a repository, -nodegit.Repository.open(path.resolve(__dirname, "../.git")) - .then(function(repoResult) { - repo = repoResult; - console.log("Opened repository."); - - // ### SHA-1 Value Conversions - - // Objects in git (commits, blobs, etc.) are referred to by their SHA value - // **nodegit** uses a simple wrapper around hash values called an `Oid`. - // The oid validates that the SHA is well-formed. - - oid = nodegit.Oid.fromString("c27d9c35e3715539d941254f2ce57042b978c49c"); - - // Most functions in in **nodegit** that take an oid will also take a - // string, so for example, you can look up a commit by a string SHA or - // an Oid, but but any functions that create new SHAs will always return - // an Oid. - - // If you have a oid, you can easily get the hex value of the SHA again. - console.log("Sha hex string:", oid.toString()); - - // ### Working with the Object Database - - // **libgit2** provides [direct access][odb] to the object database. The - // object database is where the actual objects are stored in Git. For - // working with raw objects, we'll need to get this structure from the - // repository. - return repo.odb(); - }) - - .then(function(odbResult) { - odb = odbResult; - - // We can read raw objects directly from the object database if we have - // the oid (SHA) of the object. This allows us to access objects without - // knowing thier type and inspect the raw bytes unparsed. - - return odb.read(oid); - }) - - .then(function(object) { - // A raw object only has three properties - the type (commit, blob, tree - // or tag), the size of the raw data and the raw, unparsed data itself. - // For a commit or tag, that raw data is human readable plain ASCII - // text. For a blob it is just file contents, so it could be text or - // binary data. For a tree it is a special binary format, so it's unlikely - // to be hugely helpful as a raw object. - var data = object.data(); - var type = object.type(); - var size = object.size(); - - console.log("Object size and type:", size, type); - console.log("Raw data: ", data.toString().substring(100), "..."); - - }) - - .then(function() { - // You can also write raw object data to Git. This is pretty cool because - // it gives you direct access to the key/value properties of Git. Here - // we'll write a new blob object that just contains a simple string. - // Notice that we have to specify the object type. - return odb.write("test data", "test data".length, nodegit.Object.TYPE.BLOB); - }) - - .then(function(oid) { - // Now that we've written the object, we can check out what SHA1 was - // generated when the object was written to our database. - console.log("Written Object: ", oid.toString()); - }) - - .then(function() { - // ### Object Parsing - - // libgit2 has methods to parse every object type in Git so you don't have - // to work directly with the raw data. This is much faster and simpler - // than trying to deal with the raw data yourself. - - // #### Commit Parsing - - // [Parsing commit objects][pco] is simple and gives you access to all the - // data in the commit - the author (name, email, datetime), committer - // (same), tree, message, encoding and parent(s). - - oid = nodegit.Oid.fromString("698c74e817243efe441a5d1f3cbaf3998282ca86"); - - // Many methods in **nodegit** are asynchronous, because they do file - // or network I/O. By convention, all asynchronous methods are named - // imperatively, like `getCommit`, `open`, `read`, `write`, etc., whereas - // synchronous methods are named nominatively, like `type`, `size`, `name`. - - return repo.getCommit(oid); - }) - - .then(function(commit) { - // Each of the properties of the commit object are accessible via methods, - // including commonly needed variations, such as `git_commit_time` which - // returns the author time and `git_commit_message` which gives you the - // commit message. - console.log("Commit:", commit.message(), - commit.author().name(), commit.date()); - - // Commits can have zero or more parents. The first (root) commit will - // have no parents, most commits will have one (i.e. the commit it was - // based on) and merge commits will have two or more. Commits can - // technically have any number, though it's rare to have more than two. - return commit.getParents(); - }) - - .then(function(parents) { - parents.forEach(function(parent) { - console.log("Parent:", parent.toString()); - }); - }) - - .then(function() { - // #### Writing Commits - - // nodegit provides a couple of methods to create commit objects easily as - // well. - var author = nodegit.Signature.now("Scott Chacon", - "schacon@gmail.com"); - var committer = nodegit.Signature.now("Scott A Chacon", - "scott@github.com"); - - // Commit objects need a tree to point to and optionally one or more - // parents. Here we're creating oid objects to create the commit with, - // but you can also use existing ones: - var treeId = nodegit.Oid.fromString( - "4170d10f19600b9cb086504e8e05fe7d863358a2"); - var parentId = nodegit.Oid.fromString( - "eebd0ead15d62eaf0ba276da53af43bbc3ce43ab"); - - return repo.getTree(treeId).then(function(tree) { - return repo.getCommit(parentId).then(function(parent) { - // Here we actually create the commit object with a single call with all - // the values we need to create the commit. The SHA key is written to - // the `commit_id` variable here. - return repo.createCommit( - null /* do not update the HEAD */, - author, - committer, - "example commit", - tree, - [parent]); - }).then(function(oid) { - console.log("New Commit:", oid.toString()); - }); - }); - }) - - .then(function() { - // #### Tag Parsing - - // You can parse and create tags with the [tag management API][tm], which - // functions very similarly to the commit lookup, parsing and creation - // methods, since the objects themselves are very similar. - - oid = nodegit.Oid.fromString("dcc4aa9fcdaced037434cb149ed3b6eab4d0709d"); - return repo.getTag(oid); - }) - - .then(function(tag) { - // Now that we have the tag object, we can extract the information it - // generally contains: the target (usually a commit object), the type of - // the target object (usually "commit"), the name ("v1.0"), the tagger (a - // git_signature - name, email, timestamp), and the tag message. - console.log(tag.name(), tag.targetType(), tag.message()); - - return tag.target(); - }) - - .then(function (target) { - console.log("Target is commit:", target.isCommit()); - }) - - .then(function() { - // #### Tree Parsing - - // A Tree is how Git represents the state of the filesystem - // at a given revision. In general, a tree corresponds to a directory, - // and files in that directory are either files (blobs) or directories. - - // [Tree parsing][tp] is a bit different than the other objects, in that - // we have a subtype which is the tree entry. This is not an actual - // object type in Git, but a useful structure for parsing and traversing - // tree entries. - - oid = nodegit.Oid.fromString("e1b0c7ea57bfc5e30ec279402a98168a27838ac9"); - return repo.getTree(oid); - }) - - .then(function(tree) { - console.log("Tree Size:", tree.entryCount()); - - function dfs(tree) { - var promises = []; - - tree.entries().forEach(function(entry) { - if (entry.isDirectory()) { - promises.push(entry.getTree().then(dfs)); - } else if (entry.isFile()) { - console.log("Tree Entry:", entry.name()); - } - }); - - return Promise.all(promises); +(async () => { + const repo = await nodegit.Repository.open(path.resolve(__dirname, "../.git")); + console.log("Opened repository."); + + // ### SHA-1 Value Conversions + + // Objects in git (commits, blobs, etc.) are referred to by their SHA value + // **nodegit** uses a simple wrapper around hash values called an `Oid`. + // The oid validates that the SHA is well-formed. + + let oid = nodegit.Oid.fromString("c27d9c35e3715539d941254f2ce57042b978c49c"); + + // Most functions in in **nodegit** that take an oid will also take a + // string, so for example, you can look up a commit by a string SHA or + // an Oid, but any functions that create new SHAs will always return + // an Oid. + + // If you have a oid, you can easily get the hex value of the SHA again. + console.log("Sha hex string:", oid.toString()); + + // ### Working with the Object Database + + // **libgit2** provides [direct access][odb] to the object database. The + // object database is where the actual objects are stored in Git. For + // working with raw objects, we'll need to get this structure from the + // repository. + const odb = await repo.odb(); + + // We can read raw objects directly from the object database if we have + // the oid (SHA) of the object. This allows us to access objects without + // knowing their type and inspect the raw bytes unparsed. + + const object = await odb.read(oid); + + // A raw object only has three properties - the type (commit, blob, tree + // or tag), the size of the raw data and the raw, unparsed data itself. + // For a commit or tag, that raw data is human readable plain ASCII + // text. For a blob it is just file contents, so it could be text or + // binary data. For a tree it is a special binary format, so it's unlikely + // to be hugely helpful as a raw object. + const data = object.data(); + const type = object.type(); + const size = object.size(); + + console.log("Object size and type:", size, type); + console.log("Raw data: ", data.toString().substring(100), "..."); + + // You can also write raw object data to Git. This is pretty cool because + // it gives you direct access to the key/value properties of Git. Here + // we'll write a new blob object that just contains a simple string. + // Notice that we have to specify the object type. + oid = await odb.write("test data", "test data".length, nodegit.Object.TYPE.BLOB); + + // Now that we've written the object, we can check out what SHA1 was + // generated when the object was written to our database. + console.log("Written Object: ", oid.toString()); + + // ### Object Parsing + + // libgit2 has methods to parse every object type in Git so you don't have + // to work directly with the raw data. This is much faster and simpler + // than trying to deal with the raw data yourself. + + // #### Commit Parsing + + // [Parsing commit objects][pco] is simple and gives you access to all the + // data in the commit - the author (name, email, datetime), committer + // (same), tree, message, encoding and parent(s). + + oid = nodegit.Oid.fromString("698c74e817243efe441a5d1f3cbaf3998282ca86"); + + // Many methods in **nodegit** are asynchronous, because they do file + // or network I/O. By convention, all asynchronous methods are named + // imperatively, like `getCommit`, `open`, `read`, `write`, etc., whereas + // synchronous methods are named nominatively, like `type`, `size`, `name`. + + const commit = await repo.getCommit(oid); + + // Each of the properties of the commit object are accessible via methods, + // including commonly needed variations, such as `git_commit_time` which + // returns the author time and `git_commit_message` which gives you the + // commit message. + console.log( + "Commit:", commit.message(), + commit.author().name(), commit.date() + ); + + // Commits can have zero or more parents. The first (root) commit will + // have no parents, most commits will have one (i.e. the commit it was + // based on) and merge commits will have two or more. Commits can + // technically have any number, though it's rare to have more than two. + const parents = await commit.getParents(); + for (const parent of parents) { + console.log("Parent:", parent.toString()); + } + + // #### Writing Commits + + // nodegit provides a couple of methods to create commit objects easily as + // well. + const author = nodegit.Signature.now("Scott Chacon", + "schacon@gmail.com"); + const committer = nodegit.Signature.now("Scott A Chacon", + "scott@github.com"); + + // Commit objects need a tree to point to and optionally one or more + // parents. Here we're creating oid objects to create the commit with, + // but you can also use existing ones: + const treeId = nodegit.Oid.fromString( + "4170d10f19600b9cb086504e8e05fe7d863358a2"); + const parentId = nodegit.Oid.fromString( + "eebd0ead15d62eaf0ba276da53af43bbc3ce43ab"); + + let tree = await repo.getTree(treeId); + const parent = await repo.getCommit(parentId); + // Here we actually create the commit object with a single call with all + // the values we need to create the commit. The SHA key is written to + // the `commit_id` variable here. + oid = await repo.createCommit( + null /* do not update the HEAD */, + author, + committer, + "example commit", + tree, + [parent] + ); + console.log("New Commit:", oid.toString()); + + // #### Tag Parsing + + // You can parse and create tags with the [tag management API][tm], which + // functions very similarly to the commit lookup, parsing and creation + // methods, since the objects themselves are very similar. + + oid = nodegit.Oid.fromString("dcc4aa9fcdaced037434cb149ed3b6eab4d0709d"); + const tag = await repo.getTag(oid); + + // Now that we have the tag object, we can extract the information it + // generally contains: the target (usually a commit object), the type of + // the target object (usually "commit"), the name ("v1.0"), the tagger (a + // git_signature - name, email, timestamp), and the tag message. + console.log(tag.name(), tag.targetType(), tag.message()); + + const target = await tag.target(); + console.log("Target is commit:", target.isCommit()); + + // #### Tree Parsing + + // A Tree is how Git represents the state of the filesystem + // at a given revision. In general, a tree corresponds to a directory, + // and files in that directory are either files (blobs) or directories. + + // [Tree parsing][tp] is a bit different than the other objects, in that + // we have a subtype which is the tree entry. This is not an actual + // object type in Git, but a useful structure for parsing and traversing + // tree entries. + + oid = nodegit.Oid.fromString("e1b0c7ea57bfc5e30ec279402a98168a27838ac9"); + tree = await repo.getTree(oid); + + console.log("Tree Size:", tree.entryCount()); + + /** + * @param {nodegit.Tree} tree + */ + function dfs(tree) { + const promises = []; + + for (const entry of tree.entries()) { + if (entry.isDirectory()) { + promises.push(entry.getTree().then(dfs)); + } else if (entry.isFile()) { + console.log("Tree Entry:", entry.name()); + } } - return dfs(tree).then(function() { - // You can also access tree entries by path if you know the path of the - // entry you're looking for. - return tree.getEntry("example/general.js").then(function(entry) { - // Entries which are files have blobs associated with them: - entry.getBlob(function(error, blob) { - console.log("Blob size:", blob.size()); - }); - }); - }); - }) - - .then(function() { - // #### Blob Parsing - - // The last object type is the simplest and requires the least parsing - // help. Blobs are just file contents and can contain anything, there is - // no structure to it. The main advantage to using the [simple blob - // api][ba] is that when you're creating blobs you don't have to calculate - // the size of the content. There is also a helper for reading a file - // from disk and writing it to the db and getting the oid back so you - // don't have to do all those steps yourself. - - oid = nodegit.Oid.fromString("991c06b7b1ec6f939488427e4b41a4fa3e1edd5f"); - return repo.getBlob(oid); - }) - - .then(function(blob) { - // You can access a node.js Buffer with the raw contents - // of the blob directly. Note that this buffer may not - // contain ASCII data for certain blobs (e.g. binary files). - var buffer = blob.content(); - - // If you know that the blob is UTF-8, however, - console.log("Blob contents:", blob.toString().slice(0, 38)); - console.log("Buffer:", buffer.toString().substring(100), "..."); - }) - - .then(function() { - // ### Revwalking - - // The libgit2 [revision walking api][rw] provides methods to traverse the - // directed graph created by the parent pointers of the commit objects. - // Since all commits point back to the commit that came directly before - // them, you can walk this parentage as a graph and find all the commits - // that were ancestors of (reachable from) a given starting point. This - // can allow you to create `git log` type functionality. - - oid = nodegit.Oid.fromString("698c74e817243efe441a5d1f3cbaf3998282ca86"); - - // To use the revwalker, create a new walker, tell it how you want to sort - // the output and then push one or more starting points onto the walker. - // If you want to emulate the output of `git log` you would push the SHA - // of the commit that HEAD points to into the walker and then start - // traversing them. You can also "hide" commits that you want to stop at - // or not see any of their ancestors. So if you want to emulate `git log - // branch1..branch2`, you would push the oid of `branch2` and hide the oid - // of `branch1`. - var revWalk = repo.createRevWalk(); - - revWalk.sorting(nodegit.Revwalk.SORT.TOPOLOGICAL, - nodegit.Revwalk.SORT.REVERSE); - - revWalk.push(oid); - - // Now that we have the starting point pushed onto the walker, we start - // asking for ancestors. It will return them in the sorting order we asked - // for as commit oids. We can then lookup and parse the commited pointed - // at by the returned OID; note that this operation is specially fast - // since the raw contents of the commit object will be cached in memory - - function walk() { - return revWalk.next().then(function(oid) { - if (!oid) { - return; - } - - return repo.getCommit(oid).then(function(commit) { - console.log("Commit:", commit.toString()); - return walk(); - }); - }); + return Promise.all(promises); + } + + await dfs(tree); + + // You can also access tree entries by path if you know the path of the + // entry you're looking for. + const entry = await tree.getEntry("example/general.js"); + // Entries which are files have blobs associated with them: + let blob = await entry.getBlob(); + console.log("Blob size:", blob.rawsize()); + + // #### Blob Parsing + + // The last object type is the simplest and requires the least parsing + // help. Blobs are just file contents and can contain anything, there is + // no structure to it. The main advantage to using the [simple blob + // api][ba] is that when you're creating blobs you don't have to calculate + // the size of the content. There is also a helper for reading a file + // from disk and writing it to the db and getting the oid back so you + // don't have to do all those steps yourself. + + oid = nodegit.Oid.fromString("991c06b7b1ec6f939488427e4b41a4fa3e1edd5f"); + blob = await repo.getBlob(oid); + // You can access a node.js Buffer with the raw contents + // of the blob directly. Note that this buffer may not + // contain ASCII data for certain blobs (e.g. binary files). + const buffer = blob.content(); + + // If you know that the blob is UTF-8, however, + console.log("Blob contents:", blob.toString().slice(0, 38)); + console.log("Buffer:", buffer.toString().substring(100), "..."); + + // ### Revwalking + + // The libgit2 [revision walking api][rw] provides methods to traverse the + // directed graph created by the parent pointers of the commit objects. + // Since all commits point back to the commit that came directly before + // them, you can walk this parentage as a graph and find all the commits + // that were ancestors of (reachable from) a given starting point. This + // can allow you to create `git log` type functionality. + + oid = nodegit.Oid.fromString("698c74e817243efe441a5d1f3cbaf3998282ca86"); + + // To use the revwalker, create a new walker, tell it how you want to sort + // the output and then push one or more starting points onto the walker. + // If you want to emulate the output of `git log` you would push the SHA + // of the commit that HEAD points to into the walker and then start + // traversing them. You can also "hide" commits that you want to stop at + // or not see any of their ancestors. So if you want to emulate `git log + // branch1..branch2`, you would push the oid of `branch2` and hide the oid + // of `branch1`. + const revWalk = repo.createRevWalk(); + + revWalk.sorting( + nodegit.Revwalk.SORT.TOPOLOGICAL, + nodegit.Revwalk.SORT.REVERSE + ); + + revWalk.push(oid); + + // Now that we have the starting point pushed onto the walker, we start + // asking for ancestors. It will return them in the sorting order we asked + // for as commit oids. We can then lookup and parse the commits pointed + // at by the returned OID; note that this operation is specially fast + // since the raw contents of the commit object will be cached in memory. + + async function walk() { + let oid; + try { + oid = await revWalk.next(); + } catch(error) { + if (error.errno !== nodegit.Error.CODE.ITEROVER) { + throw error; + } else { + return; + } } + const commit = await repo.getCommit(oid); + console.log("Commit:", commit.toString()); return walk(); - }) - - .then(function() { - // ### Index File Manipulation - - // The [index file API][gi] allows you to read, traverse, update and write - // the Git index file (sometimes thought of as the staging area). - return repo.refreshIndex(); - }) - - .then(function(index) { - // For each entry in the index, you can get a bunch of information - // including the SHA (oid), path and mode which map to the tree objects - // that are written out. It also has filesystem properties to help - // determine what to inspect for changes (ctime, mtime, dev, ino, uid, - // gid, file_size and flags) All these properties are exported publicly in - // the `IndexEntry` class - - index.entries().forEach(function(entry) { - console.log("Index Entry:", entry.path(), entry.mtime().seconds()); - }); - }) - - .then(function() { - // ### References - - // The [reference API][ref] allows you to list, resolve, create and update - // references such as branches, tags and remote references (everything in - // the .git/refs directory). - - return repo.getReferenceNames(nodegit.Reference.TYPE.LISTALL); - }) - - .then(function(referenceNames) { - var promises = []; - - referenceNames.forEach(function(referenceName) { - promises.push(repo.getReference(referenceName).then(function(reference) { - if (reference.isConcrete()) { - console.log("Reference:", referenceName, reference.target()); - } else if (reference.isSymbolic()) { - console.log("Reference:", referenceName, reference.symbolicTarget()); - } - })); - }); + } - return Promise.all(promises); - }) + await walk(); + + // ### Index File Manipulation + + // The [index file API][gi] allows you to read, traverse, update and write + // the Git index file (sometimes thought of as the staging area). + const index = await repo.refreshIndex(); + + // For each entry in the index, you can get a bunch of information + // including the SHA (oid), path and mode which map to the tree objects + // that are written out. It also has filesystem properties to help + // determine what to inspect for changes (ctime, mtime, dev, ino, uid, + // gid, file_size and flags) All these properties are exported publicly in + // the `IndexEntry` class + + for (const entry of index.entries()) { + console.log("Index Entry:", entry.path, entry.mtime.seconds()); + } + + // ### References + + // The [reference API][ref] allows you to list, resolve, create and update + // references such as branches, tags and remote references (everything in + // the .git/refs directory). + + const referenceNames = await repo.getReferenceNames(nodegit.Reference.TYPE.ALL); + + for (const referenceName of referenceNames) { + const reference = await repo.getReference(referenceName); + if (reference.isConcrete()) { + console.log("Reference:", referenceName, reference.target()); + } else if (reference.isSymbolic()) { + console.log("Reference:", referenceName, reference.symbolicTarget()); + } + } - .done(function() { - console.log("Done!"); - }); + console.log("Done!"); +})(); diff --git a/examples/merge-with-conflicts.js b/examples/merge-with-conflicts.js index 77e78340e4..55fb87d3b8 100644 --- a/examples/merge-with-conflicts.js +++ b/examples/merge-with-conflicts.js @@ -175,8 +175,8 @@ fse.remove(path.resolve(__dirname, repoDir)) } }) -// we need to get a new index as the other one isnt backed to -// the repository in the usual fashion, and just behaves weirdly +// we need to get a new index as the other one is not backed to +// the repository in the usual fashion, and just behaves weirdly. .then(function() { return repository.refreshIndex() .then(function(index) { diff --git a/examples/pull.js b/examples/pull.js index 7f5fc9af01..fe2d83411a 100644 --- a/examples/pull.js +++ b/examples/pull.js @@ -16,7 +16,7 @@ nodegit.Repository.open(path.resolve(__dirname, repoDir)) return nodegit.Cred.sshKeyFromAgent(userName); }, certificateCheck: function() { - return 1; + return 0; } } }); diff --git a/examples/read-file.js b/examples/read-file.js index 991a5ae390..9da5ed1758 100644 --- a/examples/read-file.js +++ b/examples/read-file.js @@ -1,25 +1,19 @@ -var nodegit = require("../"), - path = require("path"); +const nodegit = require("../"); +const path = require("path"); // This example opens a certain file, `README.md`, at a particular commit, // and prints the first 10 lines as well as some metadata. -var _entry; -nodegit.Repository.open(path.resolve(__dirname, "../.git")) - .then(function(repo) { - return repo.getCommit("59b20b8d5c6ff8d09518454d4dd8b7b30f095ab5"); - }) - .then(function(commit) { - return commit.getEntry("README.md"); - }) - .then(function(entry) { - _entry = entry; - return _entry.getBlob(); - }) - .then(function(blob) { - console.log(_entry.name(), _entry.sha(), blob.rawsize() + "b"); - console.log("========================================================\n\n"); - var firstTenLines = blob.toString().split("\n").slice(0, 10).join("\n"); - console.log(firstTenLines); - console.log("..."); - }) - .done(); + +(async () => { + const repo = await nodegit.Repository.open(path.resolve(__dirname, "../.git")); + const commit = await repo.getCommit("59b20b8d5c6ff8d09518454d4dd8b7b30f095ab5"); + const entry = await commit.getEntry("README.md"); + const blob = await entry.getBlob(); + + console.log(entry.name(), entry.sha(), blob.rawsize() + "b"); + console.log("========================================================\n\n"); + const firstTenLines = blob.toString().split("\n").slice(0, 10).join("\n"); + console.log(firstTenLines); + console.log("..."); +})(); + diff --git a/generate/index.js b/generate/index.js index 6bdaa42ef1..753897d5b7 100644 --- a/generate/index.js +++ b/generate/index.js @@ -6,6 +6,23 @@ var submoduleStatus = require("../lifecycleScripts/submodules/getStatus"); module.exports = function generate() { console.log("[nodegit] Generating native code"); + function tryGenerate(numRetries = 3) { + // There appears to be a race condition in the generate code somewhere + // Until we fix that, we should try to generate a few times before + try { + generateJson(); + generateNativeCode(); + generateMissingTests(); + } catch (error) { + if (numRetries > 0) { + console.log("[nodegit] WARNING - Failed to generate native code, trying again"); + tryGenerate(numRetries - 1); + } else { + throw error; + } + } + } + return submoduleStatus() .then(function(statuses) { var dirtySubmodules = statuses @@ -22,14 +39,11 @@ module.exports = function generate() { }); } }) - .then(function() { - generateJson(); - generateNativeCode(); - generateMissingTests(); - }) + .then(tryGenerate) .catch(function(e) { console.error("[nodegit] ERROR - Could not generate native code"); console.error(e); + throw e; }); } diff --git a/generate/input/README.md b/generate/input/README.md index 9ab9b51d6a..f0d7573eed 100644 --- a/generate/input/README.md +++ b/generate/input/README.md @@ -8,7 +8,7 @@ Customize the generated code using this configuration file. Enter the function's signature, arguments and their metadata and which functions can be skipped in this file. If you are using a manual template, remove all of its references from this file. #### libgit2-docs.json - These are provided by the libgit2 team. It includes all the metadata about the API provided by the libgit2 library. To grab the latest version of this file, download https://libgit2.github.com/libgit2/HEAD.json. + These are provided by the libgit2 team. It includes all the metadata about the API provided by the libgit2 library. To grab the latest version of this file, download https://libgit2.org/libgit2/HEAD.json. #### libgit2-supplement.json Use this confiuration file to group and override parts of the generated code. NodeGit tries its best to generate the right classes and structs, if it is not quite right, then use this config file to group/remove the functions. diff --git a/generate/input/callbacks.json b/generate/input/callbacks.json index a94faa030f..aa08429954 100644 --- a/generate/input/callbacks.json +++ b/generate/input/callbacks.json @@ -1,13 +1,28 @@ { - "git_attr_foreach_cb": { + "git_apply_delta_cb": { "args": [ { - "name": "name", - "cType": "const char *" + "name": "delta", + "cType": "const git_diff_delta *" }, { - "name": "value", - "cType": "const char *" + "name": "payload", + "cType": "void *" + } + ], + "return": { + "type": "int", + "noResults": 1, + "success": 0, + "error": -1, + "cancel": -1 + } + }, + "git_apply_hunk_cb": { + "args": [ + { + "name": "hunk", + "cType": "const git_diff_hunk *" }, { "name": "payload", @@ -18,14 +33,19 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, - "git_blob_chunk_cb": { + "git_attr_foreach_cb": { "args": [ { - "name": "entry", - "cType": "const git_config_entry *" + "name": "name", + "cType": "const char *" + }, + { + "name": "value", + "cType": "const char *" }, { "name": "payload", @@ -36,7 +56,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_checkout_notify_cb": { @@ -70,7 +91,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": 0 } }, "git_checkout_progress_cb": { @@ -93,10 +115,7 @@ } ], "return": { - "type": "int", - "noResults": 1, - "success": 0, - "error": -1, + "type": "void", "throttle": 100 } }, @@ -111,11 +130,57 @@ "cType": "void *" } ], + "return": { + "type": "void", + "throttle": 100 + } + }, + "git_commit_create_cb": { + "args": [ + { + "name": "out", + "cType": "git_oid *", + "isReturn": true + }, + { + "name": "author", + "cType": "const git_signature *" + }, + { + "name": "committer", + "cType": "const git_signature *" + }, + { + "name": "message_encoding", + "cType": "const char *" + }, + { + "name": "message", + "cType": "const char *" + }, + { + "name": "tree", + "cType": "const git_tree *" + }, + { + "name": "parent_count", + "cType": "size_t" + }, + { + "name": "parents", + "cType": "const git_oid * []" + }, + { + "name": "payload", + "cType": "void *" + } + ], "return": { "type": "int", - "noResults": 1, + "noResults": -30, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_config_foreach_cb": { @@ -133,14 +198,15 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, - "git_cred_acquire_cb": { + "git_credential_acquire_cb": { "args": [ { - "name": "cred", - "cType": "git_cred **", + "name": "credential", + "cType": "git_credential **", "isReturn": true }, { @@ -164,7 +230,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_diff_binary_cb": { @@ -186,7 +253,8 @@ "type": "int", "noResults": 0, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_diff_file_cb": { @@ -209,7 +277,7 @@ "noResults": 1, "success": 0, "error": -1, - "throttle": 100 + "cancel": -1 } }, "git_diff_hunk_cb": { @@ -231,7 +299,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_diff_line_cb": { @@ -257,14 +326,16 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_diff_notify_cb": { "args": [ { "name": "diff_so_far", - "cType": "const git_diff *" + "cType": "const git_diff *", + "ignore": true }, { "name": "delta_to_add", @@ -283,13 +354,16 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } - },"git_diff_progress_cb": { + }, + "git_diff_progress_cb": { "args": [ { "name": "diff_so_far", - "cType": "const git_diff *" + "cType": "const git_diff *", + "ignore": true }, { "name": "old_path", @@ -308,104 +382,108 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 + } + }, + "git_filter_apply_fn": { + "args": [ + { + "name": "self", + "cType": "git_filter *" + }, + { + "name": "payload", + "cType": "void **" + }, + { + "name": "to", + "cType": "git_buf *" + }, + { + "name": "from", + "cType": "const git_buf *" + }, + { + "name": "src", + "cType": "const git_filter_source *" + } + ], + "return": { + "type": "int", + "noResults": -30, + "success": 0, + "error": -1, + "cancel": -1 } }, - "git_filter_apply_fn": { - "args": [ - { - "name": "self", - "cType": "git_filter *" - }, - { - "name": "payload", - "cType": "void **" - }, - { - "name": "to", - "cType": "git_buf *" - }, - { - "name": "from", - "cType": "const git_buf *" - }, - { - "name": "src", - "cType": "const git_filter_source *" - } - ], - "return": { - "type": "int", - "noResults": -30, - "success": 0, - "error": -1 - } - }, - "git_filter_check_fn": { - "args": [ - { - "name": "self", - "cType": "git_filter *" - }, - { + "git_filter_check_fn": { + "args": [ + { + "name": "self", + "cType": "git_filter *" + }, + { "name": "payload", "cType": "void **" - }, - { + }, + { "name": "src", "cType": "const git_filter_source *" - }, - { - "name": "attr_values", - "cType": "const char **" - } - ], - "return": { - "type": "int", - "noResults": -30, - "success": 0, - "error": -1 - } - }, - "git_filter_cleanup_fn": { - "args": [ - { - "name": "self", - "cType": "git_filter *" - }, - { - "name": "payload", - "cType": "void *" - } - ], - "return": { - "type": "void" - } - }, - "git_filter_init_fn": { - "args": [ - { - "name": "self", - "cType": "git_filter *" - } - ], - "return": { - "type": "int", + }, + { + "name": "attr_values", + "cType": "const char **" + } + ], + "return": { + "type": "int", + "noResults": -30, + "success": 0, + "error": -1, + "cancel": -1 + } + }, + "git_filter_cleanup_fn": { + "args": [ + { + "name": "self", + "cType": "git_filter *" + }, + { + "name": "payload", + "cType": "void *" + } + ], + "return": { + "type": "void" + } + }, + "git_filter_init_fn": { + "args": [ + { + "name": "self", + "cType": "git_filter *" + } + ], + "return": { + "type": "int", "noResults": 0, - "success": 0, - "error": -1 - } - }, - "git_filter_shutdown_fn": { - "args": [ - { - "name": "self", - "cType": "git_filter *" - } - ], - "return": { - "type": "void" - } + "success": 0, + "error": -1, + "cancel": -1 + } + }, + "git_filter_shutdown_fn": { + "args": [ + { + "name": "self", + "cType": "git_filter *" + } + ], + "return": { + "type": "void" + } }, "git_index_matched_path_cb": { "args": [ @@ -426,7 +504,28 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 + } + }, + "git_indexer_progress_cb": { + "args": [ + { + "name": "stats", + "cType": "const git_indexer_progress *" + }, + { + "name": "payload", + "cType": "void *" + } + ], + "return": { + "type": "int", + "noResults": 0, + "success": 0, + "error": -1, + "cancel": -1, + "throttle": 100 } }, "git_note_foreach_cb": { @@ -448,7 +547,8 @@ "type": "int", "noResults": 0, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_odb_foreach_cb": { @@ -461,7 +561,14 @@ "name": "payload", "cType": "void *" } - ] + ], + "return": { + "type": "int", + "noResults": 0, + "success": 0, + "error": -1, + "cancel": -1 + } }, "git_packbuilder_foreach_cb": { "args": [ @@ -482,14 +589,38 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 + } + }, + "git_push_update_reference_cb": { + "args": [ + { + "name": "refname", + "cType": "const char *" + }, + { + "name": "status", + "cType": "const char *" + }, + { + "name": "data", + "cType": "void *" + } + ], + "return": { + "type": "int", + "noResults": 1, + "success": 0, + "error": -1, + "cancel": -1 } }, "git_remote_create_cb": { "args": [ { "name": "out", - "cType": "git_repository **", + "cType": "git_remote **", "isReturn": true }, { @@ -513,7 +644,29 @@ "type": "int", "noResults": 0, "success": 0, - "error": 1 + "error": -1, + "cancel": -1 + } + }, + "git_remote_ready_cb": { + "args": [ + { + "name": "remote", + "cType": "git_remote *" + }, + { + "name": "direction", + "cType": "int" + }, + { + "name": "payload", + "cType": "void *" + } + ], + "return": { + "type": "int", + "success": 0, + "error": -1 } }, "git_repository_create_cb": { @@ -540,7 +693,8 @@ "type": "int", "noResults": 0, "success": 0, - "error": 1 + "error": 1, + "cancel": -1 } }, "git_reference_foreach_cb": { @@ -558,7 +712,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_reference_foreach_name_cb": { @@ -576,7 +731,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_repository_fetchhead_foreach_cb": { @@ -606,7 +762,8 @@ "type": "int", "noResults": 0, "success": 0, - "error": 1 + "error": -1, + "cancel": -1 } }, "git_repository_mergehead_foreach_cb": { @@ -624,7 +781,8 @@ "type": "int", "noResults": 0, "success": 0, - "error": 1 + "error": -1, + "cancel": -1 } }, "git_revwalk_hide_cb": { @@ -642,30 +800,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 - } - }, - "git_smart_subtransport_cb": { - "args": [ - { - "name": "out", - "cType": "git_smart_subtransport **", - "isReturn": true - }, - { - "name": "owner", - "cType": "git_transport*" - }, - { - "name": "param", - "cType": "void *" - } - ], - "return": { - "type": "int", - "noResults": 0, - "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_stash_apply_progress_cb": { @@ -684,6 +820,7 @@ "noResults":0, "success": 0, "error": -1, + "cancel": -1, "throttle": 100 } }, @@ -710,7 +847,8 @@ "type": "int", "noResults":0, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_status_cb": { @@ -732,7 +870,8 @@ "type": "int", "noResults": 0, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_submodule_cb": { @@ -754,7 +893,8 @@ "type": "int", "noResults": 0, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_tag_foreach_cb": { @@ -776,29 +916,11 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 - } - }, - "git_transfer_progress_cb": { - "args": [ - { - "name": "stats", - "cType": "const git_transfer_progress *" - }, - { - "name": "payload", - "cType": "void *" - } - ], - "return": { - "type": "int", - "noResults": 0, - "success": 0, "error": -1, - "throttle": 100 + "cancel": -1 } }, - "git_push_transfer_progress": { + "git_push_transfer_progress_cb": { "args": [ { "name": "current", @@ -822,6 +944,7 @@ "noResults": 0, "success": 0, "error": -1, + "cancel": -1, "throttle": 100 } }, @@ -845,7 +968,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_transport_certificate_check_cb": { @@ -871,7 +995,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_transport_message_cb": { @@ -893,7 +1018,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, "git_treebuilder_filter_cb": { @@ -911,7 +1037,8 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": 0 } }, "git_treewalk_cb": { @@ -933,29 +1060,35 @@ "type": "int", "noResults": 1, "success": 0, - "error": -1 + "error": -1, + "cancel": -1 } }, - "git_push_update_reference_cb": { + "git_url_resolve_cb": { "args": [ { - "name": "refname", - "cType": "const char *" + "name": "url_resolved", + "cType": "git_buf *" }, { - "name": "status", + "name": "url", "cType": "const char *" }, { - "name": "data", + "name": "direction", + "cType": "int" + }, + { + "name": "payload", "cType": "void *" } ], "return": { "type": "int", - "noResults": 1, + "noResults": -30, "success": 0, - "error": -1 + "error": -1, + "cancel": -30 } } } diff --git a/generate/input/descriptor.json b/generate/input/descriptor.json index 096d4df9b1..99837534db 100644 --- a/generate/input/descriptor.json +++ b/generate/input/descriptor.json @@ -4,6 +4,19 @@ "JsName": "STATES", "isMask": false }, + "blob_filter_flag": { + "values": { + "GIT_BLOB_FILTER_CHECK_FOR_BINARY": { + "JsName": "CHECK_FOR_BINARY" + }, + "GIT_BLOB_FILTER_NO_SYSTEM_ATTRIBUTES": { + "JsName": "NO_SYSTEM_ATTRIBUTES" + }, + "GIT_BLOB_FILTER_ATTTRIBUTES_FROM_HEAD": { + "JsName": "ATTTRIBUTES_FROM_HEAD" + } + } + }, "branch": { "JsName": "BRANCH", "isMask": false @@ -20,6 +33,10 @@ } } }, + "credential": { + "JsName": "TYPE", + "owner": "Credential" + }, "describe_strategy": { "ignore": true }, @@ -48,16 +65,20 @@ "repository_init_flag": { "removeString": "INIT_" }, - "otype": { + "object": { "JsName": "TYPE", "owner": "Object", "removeString": "OBJ_" }, + "oid": { + "JsName": "TYPE", + "owner": "Oid" + }, "proxy": { "JsName": "PROXY", "isMask": false }, - "ref": { + "reference": { "owner": "Reference", "JsName": "TYPE" }, @@ -65,16 +86,26 @@ "JsName": "TYPE", "isMask": false }, + "revspec": { + "JsName": "TYPE", + "isMask": false + }, "sort": { "owner": "Revwalk" }, "status": { "JsName": "STATUS", "isMask": false + }, + "stream": { + "ignore": true } }, "types": { + "allocator": { + "ignore": true + }, "annotated_commit": { "selfFreeing": true, "functions": { @@ -87,29 +118,63 @@ } }, "git_annotated_commit_from_ref": { - "return": { - "ownedBy": ["repo"] + "args": { + "out": { + "ownedBy": ["repo"] + } } }, "git_annotated_commit_from_fetchhead": { - "return": { - "ownedBy": ["repo"] + "args": { + "out": { + "ownedBy": ["repo"] + } } }, "git_annotated_commit_lookup": { - "return": { - "ownedBy": ["repo"] + "args": { + "out": { + "ownedBy": ["repo"] + } } }, "git_annotated_commit_from_revspec": { + "args": { + "out": { + "ownedBy": ["repo"] + } + } + } + } + }, + "apply_options": { + "hasConstructor": true + }, + "apply": { + "functions": { + "git_apply": { + "args": { + "options": { + "isOptional": true + } + }, + "isAsync": true, "return": { - "ownedBy": ["repo"] + "isErrorCode": true } + }, + "git_apply_options_init": { + "ignore": true } } }, "attr": { "functions": { + "git_attr_cache_flush": { + "return": { + "isErrorCode": true + } + }, "git_attr_foreach": { "ignore": true }, @@ -174,6 +239,9 @@ }, "git_blame_init_options": { "ignore": true + }, + "git_blame_options_init": { + "ignore": true } } }, @@ -191,7 +259,7 @@ "singletonCppClassName": "GitRepository" }, "functions": { - "git_blob_create_frombuffer": { + "git_blob_create_from_buffer": { "isAsync": true, "args": { "id": { @@ -206,7 +274,7 @@ "isErrorCode": true } }, - "git_blob_create_fromworkdir": { + "git_blob_create_from_workdir": { "isAsync": true, "args": { "id": { @@ -217,7 +285,10 @@ "isErrorCode": true } }, - "git_blob_create_fromdisk": { + "git_blob_create_fromworkdir": { + "ignore": true + }, + "git_blob_create_from_disk": { "isAsync": true, "args": { "id": { @@ -228,10 +299,41 @@ "isErrorCode": true } }, - "git_blob_create_fromstream": { + "git_blob_create_from_stream": { + "ignore": true + }, + "git_blob_create_from_stream_commit": { "ignore": true }, - "git_blob_create_fromstream_commit": { + "git_blob_filter": { + "isAsync": true, + "isPrototypeMethod": true, + "args": { + "out": { + "isReturn": true, + "cppClassName": "GitBuf", + "jsClassName": "Buffer", + "shouldAlloc": true + }, + "blob": { + "cppClassName": "GitBlob", + "jsClassName": "Blob", + "isSelf": true + }, + "as_path": { + "cppClassName": "String", + "jsClassName": "String", + "cType": "const char *" + }, + "opts": { + "isOptional": true + } + }, + "return": { + "isErrorCode": true + } + }, + "git_blob_filter_options_init": { "ignore": true }, "git_blob_filtered_content": { @@ -285,6 +387,9 @@ "node_buffer.h" ] }, + "blob_filter_options": { + "hasConstructor": true + }, "branch": { "functions": { "git_branch_create": { @@ -337,16 +442,6 @@ "jsClassName": "Buffer", "cType": "git_buf *", "shouldAlloc": true - }, - "repo": { - "cppClassName": "GitRepository", - "jsClassName": "Repo", - "cType": "git_repository *" - }, - "canonical_branch_name": { - "cppClassName": "String", - "jsClassName": "String", - "cType": "const char *" } }, "return": { @@ -356,7 +451,7 @@ "git_branch_set_upstream": { "isAsync": true, "args": { - "upstream_name": { + "branch_name": { "isOptional": true } }, @@ -378,6 +473,7 @@ ] }, "buf": { + "freeFunctionName": "git_buf_dispose", "functions": { "git_buf_free": { "ignore": true @@ -397,7 +493,7 @@ "jsClassName": "Number", "isErrorCode": true }, - "isAsync": true + "isAsync": false }, "git_buf_set": { "cppFunctionName": "Set", @@ -418,13 +514,16 @@ "jsClassName": "Number", "isErrorCode": true }, - "isAsync": true + "isAsync": false } }, "dependencies": [ "../include/git_buf_converter.h" ] }, + "cert": { + "needsForwardDeclaration": false + }, "cert_hostkey": { "fields": { "hash_md5": { @@ -434,6 +533,10 @@ "hash_sha1": { "cppClassName": "String", "size": 20 + }, + "hash_sha256": { + "cppClassName": "String", + "size": 32 } } }, @@ -464,6 +567,9 @@ }, "git_checkout_index": { "args": { + "index": { + "isOptional": true + }, "opts": { "isOptional": true } @@ -476,6 +582,9 @@ "git_checkout_init_options": { "ignore": true }, + "git_checkout_options_init": { + "ignore": true + }, "git_checkout_tree": { "args": { "treeish": { @@ -495,13 +604,28 @@ "cherrypick": { "functions": { "git_cherrypick": { + "args": { + "cherrypick_options": { + "isOptional": true + } + }, "isAsync": true, "return": { "isErrorCode": true } }, + "git_cherrypick_commit": { + "args": { + "merge_options": { + "isOptional": true + } + } + }, "git_cherrypick_init_options": { "ignore": true + }, + "git_cherrypick_options_init": { + "ignore": true } } }, @@ -516,6 +640,9 @@ }, "git_clone_init_options": { "ignore": true + }, + "git_clone_options_init": { + "ignore": true } } }, @@ -672,6 +799,16 @@ } } }, + "commitarray": { + "fields": { + "commits": { + "ignore": true + }, + "count": { + "ignore": true + } + } + }, "config": { "selfFreeing": true, "functions": { @@ -685,10 +822,14 @@ "ignore": true }, "git_config_delete_entry": { - "ignore": true + "return": { + "isErrorCode": true + } }, "git_config_delete_multivar": { - "ignore": true + "return": { + "isErrorCode": true + } }, "git_config_entry_free": { "ignore": true @@ -752,7 +893,14 @@ "ignore": true }, "git_config_get_bool": { - "ignore": true + "args": { + "out": { + "shouldAlloc": true + } + }, + "return": { + "isErrorCode": true + } }, "git_config_get_entry": { "args": { @@ -763,10 +911,26 @@ } }, "git_config_get_int32": { - "ignore": true + "args": { + "out": { + "cType": "int32_t *", + "shouldAlloc": true + } + }, + "return": { + "isErrorCode": true + } }, "git_config_get_int64": { - "ignore": true + "args": { + "out": { + "cType": "int64_t *", + "shouldAlloc": true + } + }, + "return": { + "isErrorCode": true + } }, "git_config_get_mapped": { "ignore": true @@ -804,15 +968,6 @@ "git_config_init_backend": { "ignore": true }, - "git_config_iterator_free": { - "ignore": true - }, - "git_config_iterator_glob_new": { - "ignore": true - }, - "git_config_iterator_new": { - "ignore": true - }, "git_config_lock": { "isAsync": true, "args": { @@ -820,20 +975,17 @@ "isReturn": true, "ownedByThis": true } + }, + "return": { + "isErrorCode": true } }, "git_config_lookup_map_value": { "ignore": true }, - "git_config_multivar_iterator_new": { - "ignore": true - }, "git_config_new": { "ignore": true }, - "git_config_next": { - "ignore": true - }, "git_config_open_default": { "isAsync": true, "return": { @@ -887,6 +1039,12 @@ "isErrorCode": true } }, + "git_config_set_multivar": { + "isAsync": true, + "return": { + "isErrorCode": true + } + }, "git_config_set_string": { "isAsync": true, "return": { @@ -894,7 +1052,14 @@ } }, "git_config_snapshot": { - "ignore": true + "args": { + "out": { + "ownedByThis": true + } + }, + "return": { + "isErrorCode": true + } } }, "dependencies": [ @@ -908,57 +1073,132 @@ "selfFreeing": true }, "config_iterator": { - "ignore": true + "needsForwardDeclaration": false, + "selfFreeing": true, + "fields": { + "backend": { + "ignore": true + }, + "flags": { + "ignore": true + }, + "free": { + "ignore": true + }, + "next": { + "ignore": true + } + }, + "functions": { + "git_config_iterator_free": { + "ignore": true + }, + "git_config_iterator_new": { + "args": { + "out": { + "ownedBy": ["cfg"] + } + }, + "return": { + "isErrorCode": true + } + }, + "git_config_iterator_glob_new": { + "jsFunctionName": "createGlob", + "args": { + "out": { + "ownedBy": ["cfg"] + } + }, + "return": { + "isErrorCode": true + } + }, + "git_config_multivar_iterator_new": { + "jsFunctionName": "createMultivar", + "args": { + "out": { + "ownedBy": ["cfg"] + } + }, + "return": { + "isErrorCode": true + } + }, + "git_config_next": { + "jsFunctionName": "next", + "args": { + "entry": { + "ownedByThis": true + } + }, + "return": { + "isErrorCode": true + } + } + } }, - "cred": { + "config_backend_memory_options": { "selfFreeing": true, - "cType": "git_cred", + "cDependencies": [ + "git2/sys/config.h" + ] + }, + "credential": { + "needsForwardDeclaration": false, + "selfFreeing": true, + "cType": "git_credential", + "fields": { + "free": { + "ignore": true + } + }, "functions": { - "git_cred_default_new": { + "git_credential_default_new": { "isAsync": false }, - "git_cred_free": { + "git_credential_free": { "ignore": true }, - "git_cred_ssh_custom_new": { + "git_credential_ssh_custom_new": { "ignore": true }, - "git_cred_ssh_interactive_new": { + "git_credential_ssh_interactive_new": { "ignore": true }, - "git_cred_ssh_key_from_agent": { + "git_credential_ssh_key_from_agent": { "isAsync": false }, - "git_cred_ssh_key_new": { + "git_credential_ssh_key_new": { "isAsync": false }, - "git_cred_userpass": { + "git_credential_userpass": { "ignore": true }, - "git_cred_userpass_plaintext_new": { + "git_credential_userpass_plaintext_new": { "isAsync": false } } }, - "cred_default": { + "credential_default": { "ignore": true }, - "cred_ssh_custom": { + "credential_ssh_custom": { "ignore": true }, - "cred_ssh_interactive": { + "credential_ssh_interactive": { "ignore": true }, - "cred_ssh_key": { + "credential_ssh_key": { "ignore": true }, - "cred_username": { + "credential_username": { "ignore": true }, - "cred_userpass_payload": { + "credential_userpass_payload": { "ignore": true }, - "cred_userpass_plaintext": { + "credential_userpass_plaintext": { "ignore": true }, "describe": { @@ -1023,6 +1263,9 @@ "git_diff_find_init_options": { "ignore": true }, + "git_diff_find_options_init": { + "ignore": true + }, "git_diff_find_similar": { "args": { "diff": { @@ -1048,6 +1291,9 @@ "git_diff_format_email_init_options": { "ignore": true }, + "git_diff_format_email_options_init": { + "ignore": true + }, "git_diff_free": { "ignore": true }, @@ -1121,9 +1367,15 @@ "git_diff_num_deltas_of_type": { "ignore": true }, + "git_diff_options_init": { + "ignore": true + }, "git_diff_patchid_init_options": { "ignore": true }, + "git_diff_patchid_options_init": { + "ignore": true + }, "git_diff_print": { "ignore": true }, @@ -1274,13 +1526,29 @@ } } }, + "email": { + "cDependencies": [ + "git2/sys/email.h" + ] + }, + "email_create_options": { + "hasConstructor": true + }, "fetch": { "functions": { "git_fetch_init_options": { "ignore": true + }, + "git_fetch_options_init": { + "ignore": true } } }, + "fetch_options": { + "dependencies": [ + "../include/str_array_converter.h" + ] + }, "filter": { "selfFreeing": false, "hasConstructor": true, @@ -1288,6 +1556,9 @@ "git2/sys/filter.h" ], "fields": { + "cleanup": { + "ignore": true + }, "stream": { "ignore": true } @@ -1398,9 +1669,13 @@ } }, "dependencies": [ + "../include/git_buf_converter.h", "../include/filter_registry.h" ] }, + "giterr": { + "ignore": true + }, "graph": { "functions": { "git_graph_ahead_behind": { @@ -1424,12 +1699,39 @@ "return": { "isResultOrError": true } + }, + "git_graph_reachable_from_any": { + "args": { + "descendant_array": { + "cType": "git_oid *", + "cppClassName": "Array", + "jsClassName": "Array", + "arrayElementCppClassName": "GitOid" + } + }, + "isAsync": true, + "return": { + "isResultOrError": true + } } } }, "hashsig": { "selfFreeing": true, + "freeFunctionName": "git_hashsig_free", "functions": { + "git_hashsig_create": { + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_hashsig_create_fromfile": { + "isAsync": true, + "return": { + "isErrorCode": true + } + }, "git_hashsig_free": { "ignore": true } @@ -1454,6 +1756,12 @@ } } }, + "imaxdiv": { + "ignore": true + }, + "imaxdiv_t": { + "ignore": true + }, "index": { "selfFreeing": true, "ownerFn": { @@ -1493,6 +1801,9 @@ "git_index_add_frombuffer": { "ignore": true }, + "git_index_add_from_buffer": { + "ignore": true + }, "git_index_checksum": { "return": { "ownedByThis": true @@ -1667,63 +1978,290 @@ "isErrorCode": true } }, - "git_index_reuc_get_byindex": { + "git_index_update_all": { + "args": { + "pathspec": { + "isOptional": true + }, + "flags": { + "isOptional": true + }, + "callback": { + "isOptional": true + } + }, + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_index_write": { + "args": { + "force": { + "isOptional": true + } + }, + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_index_write_tree": { + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_index_write_tree_to": { + "isAsync": true, + "return": { + "isErrorCode": true + } + } + }, + "dependencies": [ + "../include/str_array_converter.h" + ] + }, + "index_conflict_iterator": { + "selfFreeing": true, + "freeFunctionName": "git_index_conflict_iterator_free", + "functions": { + "git_index_conflict_iterator_free": { + "ignore": true + }, + "git_index_conflict_iterator_new": { + "args": { + "iterator_out": { + "ownedBy": ["index"] + } + } + }, + "git_index_conflict_next": { + "isAsync": false, + "jsFunctionName": "next", + "cppFunctionName": "Next", + "args": { + "ancestor_out": { + "isReturn": true, + "ownedByThis": true + }, + "our_out": { + "isReturn": true, + "ownedByThis": true + }, + "their_out": { + "isReturn": true, + "ownedByThis": true + } + } + } + } + }, + "index_entry": { + "isReturnable": true, + "hasConstructor": true, + "ignoreInit": true + }, + "index_iterator": { + "selfFreeing": true, + "freeFunctionName": "git_index_iterator_free", + "functions": { + "git_index_iterator_free": { + "ignore": true + }, + "git_index_iterator_new": { + "args": { + "iterator_out": { + "ownedBy": ["index"] + } + } + }, + "git_index_iterator_next": { + "isAsync": false, + "args": { + "out": { + "ownedByThis": true + } + } + } + } + }, + "index_name_entry": { + "functions": { + "git_index_name_add": { + "cppFunctionName": "Add", + "jsFunctionName": "add", + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_index_name_clear": { + "cppFunctionName": "Clear", + "jsFunctionName": "clear", + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_index_name_entrycount": { + "cppFunctionName": "Entrycount", + "jsFunctionName": "entryCount" + }, + "git_index_name_get_byindex": { + "cppFunctionName": "GetByIndex", + "jsFunctionName": "getByIndex", + "isPrototypeMethod": false + } + }, + "cDependencies": [ + "git2/sys/index.h" + ] + }, + "index_reuc_entry": { + "fields": { + "mode": { + "cType": "uint32_t [3]" + } + }, + "functions": { + "git_index_reuc_add": { + "cppFunctionName": "Add", + "jsFunctionName": "add", + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_index_reuc_clear": { + "cppFunctionName": "Clear", + "jsFunctionName": "clear", + "isAsync": true, + "isPrototypeMethod": false, + "return": { + "isErrorCode": true + } + }, + "git_index_reuc_entrycount": { + "cppFunctionName": "Entrycount", + "jsFunctionName": "entryCount" + }, + "git_index_reuc_find": { + "args": { + "at_pos": { + "isReturn": true, + "shouldAlloc": true + } + }, + "cppFunctionName": "Find", + "jsFunctionName": "find", + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_index_reuc_get_byindex": { + "cppFunctionName": "GetByIndex", + "jsFunctionName": "getByIndex", + "isPrototypeMethod": false + }, + "git_index_reuc_get_bypath": { + "cppFunctionName": "GetByPath", + "jsFunctionName": "getByPath", + "isPrototypeMethod": false + }, + "git_index_reuc_remove": { + "cppFunctionName": "Remove", + "jsFunctionName": "remove", + "isAsync": true, + "isPrototypeMethod": false, + "return": { + "isErrorCode": true + } + } + }, + "cDependencies": [ + "git2/sys/index.h" + ] + }, + "index_time": { + "isReturnable": true, + "hasConstructor": true, + "ignoreInit": true + }, + "indexer": { + "ignore": true + }, + "indexer_options": { + "ignore": true + }, + "LIBSSH2_SESSION": { + "ignore": true + }, + "LIBSSH2_USERAUTH_KBDINT_PROMPT": { + "ignore": true + }, + "LIBSSH2_USERAUTH_KBDINT_RESPONSE": { + "ignore": true + }, + "_LIBSSH2_SESSION": { + "ignore": true + }, + "_LIBSSH2_USERAUTH_KBDINT_PROMPT": { + "ignore": true + }, + "_LIBSSH2_USERAUTH_KBDINT_RESPONSE": { + "ignore": true + }, + "mailmap": { + "selfFreeing": true, + "freeFunctionName": "git_mailmap_free", + "functions": { + "git_mailmap_add_entry": { + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_mailmap_free": { "ignore": true }, - "git_index_reuc_get_bypath": { - "ignore": true + "git_mailmap_from_buffer": { + "return": { + "isErrorCode": true + } }, - "git_index_update_all": { + "git_mailmap_from_repository": { "args": { - "pathspec": { - "isOptional": true - }, - "flags": { - "isOptional": true - }, - "callback": { - "isOptional": true + "out": { + "ownedBy": ["repo"] } }, - "isAsync": true, - "return": { - "isErrorCode": true - } - }, - "git_index_write": { - "args": { - "force": { - "isOptional": true - } - }, - "isAsync": true, "return": { "isErrorCode": true } }, - "git_index_write_tree": { + "git_mailmap_resolve": { + "args": { + "real_name": { + "isReturn": true + }, + "real_email": { + "isReturn": true + } + }, "isAsync": true, "return": { "isErrorCode": true } }, - "git_index_write_tree_to": { - "isAsync": true, + "git_mailmap_resolve_signature": { "return": { "isErrorCode": true } } - }, - "dependencies": [ - "../include/str_array_converter.h" - ] - }, - "index_entry": { - "hasConstructor": true, - "ignoreInit": true - }, - "indexer": { - "ignore": true + } }, "mempack": { "ignore": true @@ -1750,10 +2288,12 @@ "jsClassName": "Number" }, "merge_opts": { + "isOptional": true, "cType": "git_merge_options *", "cppClassName": "GitMergeOptions" }, "checkout_opts": { + "isOptional": true, "cType": "git_checkout_options *", "cppClassName": "GitCheckoutOptions" } @@ -1763,7 +2303,58 @@ } }, "git_merge_analysis": { - "ignore": true + "isAsync": true, + "args": { + "analysis_out": { + "isReturn": true, + "shouldAlloc": true + }, + "preference_out": { + "isReturn": true, + "shouldAlloc": true + }, + "their_heads": { + "cType": "const git_annotated_commit **", + "cppClassName": "Array", + "jsClassName": "Array", + "arrayElementCppClassName": "GitAnnotatedCommit" + }, + "their_heads_len": { + "cType": "size_t", + "cppClassName": "Number", + "jsClassName": "Number" + } + }, + "return": { + "isErrorCode": true + } + }, + "git_merge_analysis_for_ref": { + "isAsync": true, + "args": { + "analysis_out": { + "isReturn": true, + "shouldAlloc": true + }, + "preference_out": { + "isReturn": true, + "shouldAlloc": true + }, + "their_heads": { + "cType": "const git_annotated_commit **", + "cppClassName": "Array", + "jsClassName": "Array", + "arrayElementCppClassName": "GitAnnotatedCommit" + }, + "their_heads_len": { + "cType": "size_t", + "cppClassName": "Number", + "jsClassName": "Number" + } + }, + "return": { + "isErrorCode": true + } }, "git_merge_base_many": { "ignore": true @@ -1787,12 +2378,24 @@ "git_merge_file_from_index": { "ignore": true }, + "git_merge_file_input_init": { + "ignore": true + }, + "git_merge_file_init_input": { + "ignore": true + }, "git_merge_file_init_options": { "ignore": true }, + "git_merge_file_options_init": { + "ignore": true + }, "git_merge_init_options": { "ignore": true }, + "git_merge_options_init": { + "ignore": true + }, "git_merge_trees": { "args": { "ancestor_tree": { @@ -1852,6 +2455,9 @@ "notes_commit_out": { "isReturn": true } + }, + "return": { + "isErrorCode": true } }, "git_note_comitter": { @@ -1931,16 +2537,10 @@ "ignore": true }, "git_odb_add_disk_alternate": { - "ignore": true - }, - "git_odb_backend_loose": { - "ignore": true - }, - "git_odb_backend_one_pack": { - "ignore": true - }, - "git_odb_backend_pack": { - "ignore": true + "isAsync": true, + "return": { + "isErrorCode": true + } }, "git_odb_exists": { "ignore": true, @@ -2007,9 +2607,11 @@ "ignore": true }, "git_odb_read": { + "isAsync": true, "cppFunctionName": "OdbRead", "args": { - "out": { + "obj": { + "isReturn": true, "ownedByThis": true } } @@ -2094,10 +2696,17 @@ "shouldAlloc": true, "functions": { "git_oid_cpy": { + "isAsync": false, "args": { + "src": { + "shouldAlloc": false + }, "out": { "isReturn": true } + }, + "return": { + "isErrorCode": true } }, "git_oid_fmt": { @@ -2107,13 +2716,25 @@ "ignore": true }, "git_oid_fromstr": { - "isAsync": false + "ignore": true }, "git_oid_fromstrn": { "ignore": true }, "git_oid_fromstrp": { - "ignore": true + "isAsync": false, + "jsFunctionName": "fromString", + "args": { + "out": { + "isReturn": true + }, + "str": { + "shouldAlloc": false + } + }, + "return": { + "isErrorCode": true + } }, "git_oid_nfmt": { "ignore": true @@ -2158,9 +2779,7 @@ } }, "openssl": { - "cDependencies": [ - "git2/sys/openssl.h" - ] + "ignore": true }, "packbuilder": { "selfFreeing": true, @@ -2219,10 +2838,37 @@ "dependencies": [ "../include/convenient_patch.h" ], + "ownerFn": { + "name": "git_patch_owner", + "singletonCppClassName": "GitRepository" + }, "functions": { "git_patch_free": { "ignore": true }, + "git_patch_from_blobs": { + "isAsync": true, + "args": { + "out": { + "isReturn": true + }, + "old_blob": { + "isOptional": true + }, + "old_as_path": { + "isOptional": true + }, + "new_blob": { + "isOptional": true + }, + "new_as_path": { + "isOptional": true + }, + "opts": { + "isOptional": true + } + } + }, "git_patch_from_blob_and_buffer": { "ignore": true }, @@ -2231,8 +2877,10 @@ }, "git_patch_from_diff": { "isAsync": true, - "return": { - "ownedBy": ["diff"] + "args": { + "out": { + "ownedBy": ["diff"] + } } }, "git_patch_get_delta": { @@ -2292,6 +2940,11 @@ } } }, + "path": { + "cDependencies": [ + "git2/sys/path.h" + ] + }, "pathspec": { "selfFreeing": true, "dependencies": [ @@ -2359,13 +3012,22 @@ "functions": { "git_proxy_init_options": { "ignore": true + }, + "git_proxy_options_init": { + "ignore": true } } }, "push": { "ignore": true }, + "push_options": { + "dependencies": [ + "../include/str_array_converter.h" + ] + }, "rebase": { + "hasConstructor": false, "selfFreeing": true, "functions": { "git_rebase_abort": { @@ -2421,6 +3083,7 @@ "git_rebase_init": { "args": { "out": { + "isSelf": true, "ownedBy": ["repo"] }, "upstream": { @@ -2433,6 +3096,7 @@ "isOptional": true }, "opts": { + "preserveOnThis": true, "isOptional": true } } @@ -2448,11 +3112,15 @@ } }, "git_rebase_next": { + "isAsync": true, "args": { "operation": { "isReturn": true, "ownedByThis": true } + }, + "return": { + "isErrorCode": true } }, "git_rebase_open": { @@ -2460,6 +3128,10 @@ "out": { "isSelf": true, "ownedBy": ["repo"] + }, + "opts": { + "isOptional": true, + "preserveOnThis": true } } }, @@ -2472,6 +3144,60 @@ "return": { "ownedByThis": true } + }, + "git_rebase_options_init": { + "ignore": true + } + } + }, + "rebase_options": { + "fields": { + "signing_cb": { + "ignore": true + }, + "commit_create_cb": { + "args": [ + { + "name": "out", + "cType": "git_oid *" + }, + { + "name": "author", + "cType": "const git_signature *" + }, + { + "name": "committer", + "cType": "const git_signature *" + }, + { + "name": "message_encoding", + "cType": "const char *" + }, + { + "name": "message", + "cType": "const char *" + }, + { + "name": "tree", + "cType": "const git_tree *" + }, + { + "name": "parent_count", + "cType": "size_t" + }, + { + "name": "parents", + "cType": "const git_oid **", + "cppClassName": "Array", + "jsClassName": "Array", + "arrayElementCppClassName": "GitOid", + "arrayLengthArgumentName": "parent_count" + }, + { + "name": "payload", + "cType": "void *" + } + ] } } }, @@ -2686,19 +3412,57 @@ } } }, + "git_remote_create_init_options": { + "ignore": true + }, + "git_remote_create_options_init": { + "ignore": true + }, + "git_remote_create_with_opts": { + "args": { + "opts": { + "isOptional": true + } + } + }, "git_remote_connect": { "isAsync": true, + "args": { + "callbacks": { + "isOptional": true, + "preserveOnThis": true + }, + "proxy_opts": { + "isOptional": true, + "preserveOnThis": true + }, + "custom_headers": { + "isOptional": true + } + }, "return": { "isErrorCode": true } }, - "git_remote_disconnect": { + "git_remote_connect_ext": { "isAsync": true }, + "git_remote_connect_options_init": { + "ignore": true + }, + "git_remote_disconnect": { + "isAsync": true, + "return": { + "isErrorCode": true + } + }, "git_remote_download": { "args": { "refspecs": { "isOptional": true + }, + "opts": { + "isOptional": true } }, "isAsync": true, @@ -2730,6 +3494,9 @@ }, "git_remote_fetch": { "args": { + "opts": { + "isOptional": true + }, "reflog_message": { "isOptional": true }, @@ -2779,15 +3546,32 @@ }, "git_remote_get_refspec": { "return": { + "selfFreeing": false, "ownedByThis": true } }, "git_remote_init_callbacks": { "ignore": true }, - "git_remote_list": { + "git_remote_list": { + "args": { + "out": { + "isReturn": true, + "shouldAlloc": true, + "cppClassName": "Array", + "jsClassName": "Array", + "size": "count", + "key": "strings" + } + } + }, + "git_remote_ls": { + "ignore": true + }, + "git_remote_rename": { + "isAsync": true, "args": { - "out": { + "problems": { "isReturn": true, "shouldAlloc": true, "cppClassName": "Array", @@ -2795,13 +3579,21 @@ "size": "count", "key": "strings" } + }, + "return": { + "isErrorCode": true } }, - "git_remote_ls": { - "ignore": true - }, - "git_remote_rename": { - "ignore": true + "git_remote_prune": { + "args": { + "callbacks": { + "isOptional": true + } + }, + "isAsync": true, + "return": { + "isErrorCode": true + } }, "git_remote_push": { "isAsync": true, @@ -2809,6 +3601,9 @@ "isErrorCode": true }, "args": { + "refspecs": { + "isOptional": true + }, "opts": { "isOptional": true } @@ -2827,9 +3622,36 @@ "return": { "ownedByThis": true } + }, + "git_remote_update_tips": { + "isAsync": true, + "args": { + "reflog_message": { + "isOptional": true + } + }, + "return": { + "isErrorCode": true + } + }, + "git_remote_upload": { + "args": { + "refspecs": { + "isOptional": true + } + }, + "isAsync": true, + "return": { + "isErrorCode": true + } } } }, + "remote_connect_options": { + "dependencies": [ + "../include/str_array_converter.h" + ] + }, "remote_callbacks": { "fields": { "completion": { @@ -2842,7 +3664,7 @@ "ignore": true }, "sideband_progress": { - "ignore": true + "ignore": false }, "update_tips": { "ignore": true @@ -2857,12 +3679,40 @@ "selfFreeing": true }, "repository": { + "hasConstructor": false, "selfFreeing": true, "isSingleton": true, "dependencies": [ - "git2/sys/repository.h" + "git2/sys/repository.h", + "../include/commit.h", + "../include/submodule.h", + "../include/remote.h" ], "functions": { + "git_repository__cleanup": { + "isAsync": true, + "return": { + "isErrorCode": true + } + }, + "git_repository_commit_parents": { + "isAsync": true, + "args": { + "commits": { + "shouldAlloc": true, + "selfFreeing": true, + "isReturn": true, + "cppClassName": "Array", + "jsClassName": "Array", + "arrayElementCppClassName": "GitCommit", + "size": "count", + "key": "commits" + } + }, + "return": { + "isErrorCode": true + } + }, "git_repository_config": { "args": { "out": { @@ -2879,6 +3729,9 @@ "isErrorCode": true }, "args": { + "ceiling_dirs": { + "isOptional": true + }, "out": { "isReturn": true, "isSelf": false, @@ -2899,9 +3752,17 @@ "ignore": true }, "git_repository_ident": { - "ignore": true + "args": { + "name": { + "isReturn": true + }, + "email": { + "isReturn": true + } + }, + "isAsync": false }, - "git_repository_init_init_options": { + "git_repository_init_options_init": { "ignore": true }, "git_repository_mergehead_foreach": { @@ -2964,6 +3825,9 @@ "index": { "isOptional": true } + }, + "return": { + "isErrorCode": true } }, "git_repository_set_odb": { @@ -2971,6 +3835,19 @@ }, "git_repository_set_refdb": { "ignore": true + }, + "git_repository_statistics": { + "isAsync": true + }, + "git_repository_submodule_cache_all": { + "return": { + "isErrorCode": true + } + }, + "git_repository_submodule_cache_clear": { + "return": { + "isErrorCode": true + } } } }, @@ -2997,6 +3874,9 @@ }, "git_revert_init_options": { "ignore": true + }, + "git_revert_options_init": { + "ignore": true } } }, @@ -3013,6 +3893,7 @@ ], "functions": { "git_reset": { + "isCollectionRoot": true, "args": { "checkout_opts": { "isOptional": true @@ -3053,9 +3934,6 @@ } } }, - "revspec": { - "ignore": true - }, "revwalk": { "selfFreeing": true, "ownerFn": { @@ -3083,7 +3961,10 @@ "dupFunction": "git_signature_dup", "functions": { "git_signature_default": { - "isAsync": false + "isAsync": true, + "return": { + "isErrorCode": true + } }, "git_signature_dup": { "ignore": true @@ -3095,7 +3976,15 @@ "isAsync": false }, "git_signature_now": { - "isAsync": false + "isAsync": false, + "args": { + "sig_out": { + "isReturn": true + } + }, + "return": { + "isErrorCode": true + } } } }, @@ -3112,17 +4001,37 @@ } } }, + "smart_subtransport": { + "ignore": true + }, "smart_subtransport_definition": { "ignore": true }, + "smart_subtransport_stream": { + "ignore": true + }, "stash": { "functions": { "git_stash_apply": { + "args": { + "options": { + "isOptional": true + } + }, "isAsync": true, "return": { "isErrorCode": true } }, + "git_stash_apply_init_options": { + "ignore": true + }, + "git_stash_apply_options_init": { + "ignore": true + }, + "git_stash_save_options_init": { + "ignore": true + }, "git_stash_drop": { "isAsync": true, "return": { @@ -3135,10 +4044,12 @@ "isErrorCode": true } }, - "git_stash_apply_init_options": { - "ignore": true - }, "git_stash_pop": { + "args": { + "options": { + "isOptional": true + } + }, "isAsync": true, "return": { "isErrorCode": true @@ -3157,6 +4068,14 @@ } } }, + "stash_save_options": { + "dependencies": [ + "../include/str_array_converter.h" + ] + }, + "stdalloc": { + "ignore": true + }, "status": { "cDependencies": [ "git2/sys/diff.h" @@ -3194,6 +4113,9 @@ }, "git_status_init_options": { "ignore": true + }, + "git_status_options_init": { + "ignore": true } } }, @@ -3203,6 +4125,21 @@ "git_status_list_free": { "ignore": true }, + "git_status_list_get_perfdata": { + "isAsync": false, + "args": { + "out": { + "isReturn": true, + "shouldAlloc": true + }, + "status": { + "isSelf": true + } + }, + "return": { + "isErrorCode": true + } + }, "git_status_list_new": { "isAsync": true, "args": { @@ -3221,7 +4158,11 @@ }, "strarray": { "selfFreeing": true, + "freeFunctionName": "git_strarray_dispose", "functions": { + "git_strarray_dispose": { + "ignore": true + }, "git_strarray_free": { "ignore": true } @@ -3236,7 +4177,11 @@ "git2/sys/stream.h" ] }, + "stream_registration": { + "ignore": true + }, "submodule": { + "hasConstructor": false, "selfFreeing": true, "ownerFn": { "name": "git_submodule_owner", @@ -3384,6 +4329,9 @@ }, "git_submodule_update_init_options": { "ignore": true + }, + "git_submodule_update_options_init": { + "ignore": true } } }, @@ -3419,8 +4367,16 @@ }, "isAsync": true }, - "git_tag_create_frombuffer": { - "ignore": true + "git_tag_create_from_buffer": { + "args": { + "oid": { + "isReturn": true + } + }, + "return": { + "isErrorCode": true + }, + "isAsync": true }, "git_tag_create_lightweight": { "args": { @@ -3518,14 +4474,6 @@ }, "time": { "dupFunction": "git_time_dup", - "dependencies": [ - "git2/sys/time.h" - ], - "fields": { - "sign": { - "ignore": true - } - }, "functions": { "git_time_sign": { "ignore": true @@ -3560,6 +4508,47 @@ "transport": { "cType": "git_transport", "needsForwardDeclaration": false, + "fields": { + "cancel": { + "ignore": true + }, + "close": { + "ignore": true + }, + "connect": { + "ignore": true + }, + "download_pack": { + "ignore": true + }, + "free": { + "ignore": true + }, + "is_connected": { + "ignore": true + }, + "ls": { + "ignore": true + }, + "negotiate_fetch": { + "ignore": true + }, + "push": { + "ignore": true + }, + "read_flags": { + "ignore": true + }, + "set_callbacks": { + "ignore": true + }, + "set_custom_headers": { + "ignore": true + }, + "version": { + "ignore": true + } + }, "functions": { "git_transport_dummy": { "ignore": true @@ -3597,6 +4586,16 @@ "singletonCppClassName": "GitRepository" }, "functions": { + "git_tree_create_updated": { + "args": { + "updates": { + "cType": "git_tree_update *", + "cppClassName": "Array", + "jsClassName": "Array", + "arrayElementCppClassName": "GitTreeUpdate" + } + } + }, "git_tree_entry_byid": { "return": { "ownedByThis": true @@ -3647,6 +4646,11 @@ "treebuilder": { "selfFreeing": true, "functions": { + "git_treebuilder_clear": { + "return": { + "isErrorCode": true + } + }, "git_treebuilder_filter": { "ignore": true }, @@ -3722,6 +4726,9 @@ } } }, + "win32": { + "ignore": true + }, "worktree": { "selfFreeing": true, "cType": "git_worktree", @@ -3737,9 +4744,20 @@ "git_worktree_add_init_options": { "ignore": true }, + "git_worktree_add_options_init": { + "ignore": true + }, "git_worktree_free": { "ignore": true }, + "git_worktree_is_prunable": { + "args": { + "opts": { + "isOptional": true + } + }, + "isAsync": true + }, "git_worktree_lookup": { "args": { "out": { @@ -3756,12 +4774,44 @@ }, "git_worktree_prune_init_options": { "ignore": true + }, + "git_worktree_prune_options_init": { + "ignore": true + }, + "git_worktree_prune": { + "args": { + "opts": { + "isOptional": true + } + }, + "isAsync": true, + "return": { + "isErrorCode": true + } } - } + }, + "dependencies": [ + "../include/git_buf_converter.h" + ] + }, + "tree_update": { + "hasConstructor": true, + "ignoreInit": true }, "writestream": { "cType": "git_writestream", - "needsForwardDeclaration": false + "needsForwardDeclaration": false, + "fields": { + "close": { + "ignore": true + }, + "free": { + "ignore": true + }, + "write": { + "ignore": true + } + } } } } diff --git a/generate/input/libgit2-docs.json b/generate/input/libgit2-docs.json index 75f9a46af2..e1969402e7 100644 --- a/generate/input/libgit2-docs.json +++ b/generate/input/libgit2-docs.json @@ -1,47 +1,68 @@ { "files": [ { - "file": "annotated_commit.h", + "file": "git2/annotated_commit.h", "functions": [ "git_annotated_commit_from_ref", "git_annotated_commit_from_fetchhead", "git_annotated_commit_lookup", "git_annotated_commit_from_revspec", "git_annotated_commit_id", + "git_annotated_commit_ref", "git_annotated_commit_free" ], "meta": {}, - "lines": 112 + "lines": 128 }, { - "file": "attr.h", + "file": "git2/apply.h", + "functions": [ + "git_apply_delta_cb", + "git_apply_hunk_cb", + "git_apply_options_init", + "git_apply_to_tree", + "git_apply" + ], + "meta": {}, + "lines": 182 + }, + { + "file": "git2/attr.h", "functions": [ "git_attr_value", "git_attr_get", + "git_attr_get_ext", "git_attr_get_many", + "git_attr_get_many_ext", + "git_attr_foreach_cb", "git_attr_foreach", + "git_attr_foreach_ext", "git_attr_cache_flush", "git_attr_add_macro" ], "meta": {}, - "lines": 240 + "lines": 378 }, { - "file": "blame.h", + "file": "git2/blame.h", "functions": [ - "git_blame_init_options", + "git_blame_options_init", + "git_blame_linecount", + "git_blame_hunkcount", + "git_blame_hunk_byindex", + "git_blame_hunk_byline", + "git_blame_line_byindex", "git_blame_get_hunk_count", "git_blame_get_hunk_byindex", "git_blame_get_hunk_byline", - "git_blame_file", "git_blame_buffer", "git_blame_free" ], "meta": {}, - "lines": 207 + "lines": 385 }, { - "file": "blob.h", + "file": "git2/blob.h", "functions": [ "git_blob_lookup", "git_blob_lookup_prefix", @@ -50,20 +71,22 @@ "git_blob_owner", "git_blob_rawcontent", "git_blob_rawsize", - "git_blob_filtered_content", - "git_blob_create_fromworkdir", - "git_blob_create_fromdisk", - "git_blob_create_fromstream", - "git_blob_create_fromstream_commit", - "git_blob_create_frombuffer", + "git_blob_filter_options_init", + "git_blob_filter", + "git_blob_create_from_workdir", + "git_blob_create_from_disk", + "git_blob_create_from_stream", + "git_blob_create_from_stream_commit", + "git_blob_create_from_buffer", "git_blob_is_binary", + "git_blob_data_is_binary", "git_blob_dup" ], "meta": {}, - "lines": 228 + "lines": 350 }, { - "file": "branch.h", + "file": "git2/branch.h", "functions": [ "git_branch_create", "git_branch_create_from_annotated", @@ -76,61 +99,66 @@ "git_branch_name", "git_branch_upstream", "git_branch_set_upstream", + "git_branch_upstream_name", "git_branch_is_head", - "git_branch_is_checked_out" + "git_branch_is_checked_out", + "git_branch_remote_name", + "git_branch_upstream_remote", + "git_branch_upstream_merge", + "git_branch_name_is_valid" ], "meta": {}, - "lines": 258 + "lines": 339 }, { - "file": "buffer.h", - "functions": [ - "git_buf_free", - "git_buf_grow", - "git_buf_set", - "git_buf_is_binary", - "git_buf_contains_nul" - ], + "file": "git2/buffer.h", + "functions": ["git_buf_dispose"], + "meta": {}, + "lines": 71 + }, + { + "file": "git2/cert.h", + "functions": ["git_transport_certificate_check_cb"], "meta": {}, - "lines": 122 + "lines": 168 }, { - "file": "checkout.h", + "file": "git2/checkout.h", "functions": [ "git_checkout_notify_cb", "git_checkout_progress_cb", "git_checkout_perfdata_cb", - "git_checkout_init_options", + "git_checkout_options_init", "git_checkout_head", "git_checkout_index", "git_checkout_tree" ], "meta": {}, - "lines": 361 + "lines": 463 }, { - "file": "cherrypick.h", + "file": "git2/cherrypick.h", "functions": [ - "git_cherrypick_init_options", + "git_cherrypick_options_init", "git_cherrypick_commit", "git_cherrypick" ], "meta": {}, - "lines": 84 + "lines": 94 }, { - "file": "clone.h", + "file": "git2/clone.h", "functions": [ "git_remote_create_cb", "git_repository_create_cb", - "git_clone_init_options", + "git_clone_options_init", "git_clone" ], "meta": {}, - "lines": 203 + "lines": 220 }, { - "file": "commit.h", + "file": "git2/commit.h", "functions": [ "git_commit_lookup", "git_commit_lookup_prefix", @@ -146,6 +174,8 @@ "git_commit_time_offset", "git_commit_committer", "git_commit_author", + "git_commit_committer_with_mailmap", + "git_commit_author_with_mailmap", "git_commit_raw_header", "git_commit_tree", "git_commit_tree_id", @@ -157,28 +187,34 @@ "git_commit_extract_signature", "git_commit_create", "git_commit_create_v", + "git_commit_create_from_stage", "git_commit_amend", "git_commit_create_buffer", "git_commit_create_with_signature", - "git_commit_dup" + "git_commit_dup", + "git_commit_create_cb", + "git_commitarray_dispose" ], "meta": {}, - "lines": 474 + "lines": 670 }, { - "file": "common.h", + "file": "git2/common.h", "functions": [ "git_libgit2_version", + "git_libgit2_prerelease", "git_libgit2_features", + "git_libgit2_feature_backend", "git_libgit2_opts" ], "meta": {}, - "lines": 352 + "lines": 569 }, { - "file": "config.h", + "file": "git2/config.h", "functions": [ "git_config_entry_free", + "git_config_foreach_cb", "git_config_find_global", "git_config_find_xdg", "git_config_find_system", @@ -189,6 +225,7 @@ "git_config_open_ondisk", "git_config_open_level", "git_config_open_global", + "git_config_set_writeorder", "git_config_snapshot", "git_config_free", "git_config_get_entry", @@ -223,38 +260,91 @@ "git_config_lock" ], "meta": {}, - "lines": 751 + "lines": 847 + }, + { + "file": "git2/credential.h", + "functions": [ + "git_credential_acquire_cb", + "git_credential_free", + "git_credential_has_username", + "git_credential_get_username", + "git_credential_userpass_plaintext_new", + "git_credential_default_new", + "git_credential_username_new", + "git_credential_ssh_key_new", + "git_credential_ssh_key_memory_new", + "git_credential_ssh_interactive_cb", + "git_credential_ssh_interactive_new", + "git_credential_ssh_key_from_agent", + "git_credential_sign_cb", + "git_credential_ssh_custom_new" + ], + "meta": {}, + "lines": 338 + }, + { + "file": "git2/credential_helpers.h", + "functions": ["git_credential_userpass"], + "meta": {}, + "lines": 49 }, { - "file": "cred_helpers.h", + "file": "git2/deprecated.h", "functions": [ - "git_cred_userpass" + "git_blob_filtered_content", + "git_filter_list_stream_data", + "git_filter_list_apply_to_data", + "git_treebuilder_write_with_buffer", + "git_buf_grow", + "git_buf_set", + "git_buf_is_binary", + "git_buf_contains_nul", + "git_buf_free", + "git_commit_signing_cb", + "git_diff_format_email", + "git_diff_commit_as_email", + "git_diff_format_email_options_init", + "giterr_last", + "giterr_clear", + "giterr_set_str", + "giterr_set_oom", + "git_object__size", + "git_remote_is_valid_name", + "git_reference_is_valid_name", + "git_oidarray_free", + "git_headlist_cb", + "git_strarray_copy", + "git_strarray_free", + "git_blame_init_options" ], "meta": {}, - "lines": 48 + "lines": 1035 }, { - "file": "describe.h", + "file": "git2/describe.h", "functions": [ + "git_describe_options_init", + "git_describe_format_options_init", "git_describe_commit", "git_describe_workdir", "git_describe_format", "git_describe_result_free" ], "meta": {}, - "lines": 161 + "lines": 201 }, { - "file": "diff.h", + "file": "git2/diff.h", "functions": [ "git_diff_notify_cb", "git_diff_progress_cb", - "git_diff_init_options", + "git_diff_options_init", "git_diff_file_cb", "git_diff_binary_cb", "git_diff_hunk_cb", "git_diff_line_cb", - "git_diff_find_init_options", + "git_diff_find_options_init", "git_diff_free", "git_diff_tree_to_tree", "git_diff_tree_to_index", @@ -282,72 +372,69 @@ "git_diff_stats_deletions", "git_diff_stats_to_buf", "git_diff_stats_free", - "git_diff_format_email", - "git_diff_commit_as_email", - "git_diff_format_email_init_options", - "git_diff_patchid_init_options", + "git_diff_patchid_options_init", "git_diff_patchid" ], "meta": {}, - "lines": 1452 + "lines": 1502 }, { - "file": "errors.h", - "functions": [ - "giterr_last", - "giterr_clear", - "giterr_set_str", - "giterr_set_oom" - ], + "file": "git2/email.h", + "functions": ["git_email_create_from_commit"], + "meta": {}, + "lines": 102 + }, + { + "file": "git2/errors.h", + "functions": ["git_error_last"], "meta": {}, "lines": 149 }, { - "file": "filter.h", + "file": "git2/filter.h", "functions": [ "git_filter_list_load", + "git_filter_list_load_ext", "git_filter_list_contains", - "git_filter_list_apply_to_data", + "git_filter_list_apply_to_buffer", "git_filter_list_apply_to_file", "git_filter_list_apply_to_blob", - "git_filter_list_stream_data", + "git_filter_list_stream_buffer", "git_filter_list_stream_file", "git_filter_list_stream_blob", "git_filter_list_free" ], "meta": {}, - "lines": 210 + "lines": 278 }, { - "file": "global.h", - "functions": [ - "git_libgit2_init", - "git_libgit2_shutdown" - ], + "file": "git2/global.h", + "functions": ["git_libgit2_init", "git_libgit2_shutdown"], "meta": {}, - "lines": 39 + "lines": 45 }, { - "file": "graph.h", + "file": "git2/graph.h", "functions": [ "git_graph_ahead_behind", - "git_graph_descendant_of" + "git_graph_descendant_of", + "git_graph_reachable_from_any" ], "meta": {}, - "lines": 54 + "lines": 73 }, { - "file": "ignore.h", + "file": "git2/ignore.h", "functions": [ "git_ignore_add_rule", "git_ignore_clear_internal_rules", "git_ignore_path_is_ignored" ], "meta": {}, - "lines": 74 + "lines": 83 }, { - "file": "index.h", + "file": "git2/index.h", "functions": [ "git_index_matched_path_cb", "git_index_open", @@ -374,8 +461,11 @@ "git_index_add", "git_index_entry_stage", "git_index_entry_is_conflict", + "git_index_iterator_new", + "git_index_iterator_next", + "git_index_iterator_free", "git_index_add_bypath", - "git_index_add_frombuffer", + "git_index_add_from_buffer", "git_index_remove_bypath", "git_index_add_all", "git_index_remove_all", @@ -392,27 +482,45 @@ "git_index_conflict_iterator_free" ], "meta": {}, - "lines": 806 + "lines": 928 }, { - "file": "indexer.h", + "file": "git2/indexer.h", "functions": [ + "git_indexer_progress_cb", + "git_indexer_options_init", "git_indexer_new", "git_indexer_append", "git_indexer_commit", "git_indexer_hash", + "git_indexer_name", "git_indexer_free" ], "meta": {}, - "lines": 72 + "lines": 207 + }, + { + "file": "git2/mailmap.h", + "functions": [ + "git_mailmap_new", + "git_mailmap_free", + "git_mailmap_add_entry", + "git_mailmap_from_buffer", + "git_mailmap_from_repository", + "git_mailmap_resolve", + "git_mailmap_resolve_signature" + ], + "meta": {}, + "lines": 116 }, { - "file": "merge.h", + "file": "git2/merge.h", "functions": [ - "git_merge_file_init_input", - "git_merge_file_init_options", - "git_merge_init_options", + "git_merge_file_input_init", + "git_merge_file_options_init", + "git_merge_options_init", "git_merge_analysis", + "git_merge_analysis_for_ref", "git_merge_base", "git_merge_bases", "git_merge_base_many", @@ -426,52 +534,31 @@ "git_merge" ], "meta": {}, - "lines": 585 + "lines": 666 }, { - "file": "message.h", + "file": "git2/message.h", "functions": [ "git_message_prettify", "git_message_trailers", "git_message_trailer_array_free" ], "meta": {}, - "lines": 79 + "lines": 81 }, + { "file": "git2/net.h", "functions": [], "meta": {}, "lines": 51 }, { - "file": "net.h", - "functions": [ - "git_headlist_cb" - ], - "meta": {}, - "lines": 55 - }, - { - "file": "notes.h", + "file": "git2/notes.h", "functions": [ "git_note_foreach_cb", - "git_note_iterator_new", - "git_note_commit_iterator_new", "git_note_iterator_free", - "git_note_next", - "git_note_read", - "git_note_commit_read", - "git_note_author", - "git_note_committer", - "git_note_message", - "git_note_id", - "git_note_create", - "git_note_commit_create", - "git_note_remove", - "git_note_commit_remove", - "git_note_free", - "git_note_foreach" + "git_note_next" ], "meta": {}, - "lines": 302 + "lines": 91 }, { - "file": "object.h", + "file": "git2/object.h", "functions": [ "git_object_lookup", "git_object_lookup_prefix", @@ -484,15 +571,15 @@ "git_object_type2string", "git_object_string2type", "git_object_typeisloose", - "git_object__size", "git_object_peel", - "git_object_dup" + "git_object_dup", + "git_object_rawcontent_is_valid" ], "meta": {}, - "lines": 237 + "lines": 274 }, { - "file": "odb.h", + "file": "git2/odb.h", "functions": [ "git_odb_foreach_cb", "git_odb_new", @@ -503,6 +590,7 @@ "git_odb_read_prefix", "git_odb_read_header", "git_odb_exists", + "git_odb_exists_ext", "git_odb_exists_prefix", "git_odb_expand_ids", "git_odb_refresh", @@ -515,6 +603,7 @@ "git_odb_stream_free", "git_odb_open_rstream", "git_odb_write_pack", + "git_odb_write_multi_pack_index", "git_odb_hash", "git_odb_hashfile", "git_odb_object_dup", @@ -526,23 +615,24 @@ "git_odb_add_backend", "git_odb_add_alternate", "git_odb_num_backends", - "git_odb_get_backend" + "git_odb_get_backend", + "git_odb_set_commit_graph" ], "meta": {}, - "lines": 544 + "lines": 691 }, { - "file": "odb_backend.h", + "file": "git2/odb_backend.h", "functions": [ "git_odb_backend_pack", - "git_odb_backend_loose", - "git_odb_backend_one_pack" + "git_odb_backend_one_pack", + "git_odb_backend_loose" ], "meta": {}, - "lines": 130 + "lines": 246 }, { - "file": "oid.h", + "file": "git2/oid.h", "functions": [ "git_oid_fromstr", "git_oid_fromstrp", @@ -559,24 +649,22 @@ "git_oid_ncmp", "git_oid_streq", "git_oid_strcmp", - "git_oid_iszero", + "git_oid_is_zero", "git_oid_shorten_new", "git_oid_shorten_add", "git_oid_shorten_free" ], "meta": {}, - "lines": 264 + "lines": 366 }, { - "file": "oidarray.h", - "functions": [ - "git_oidarray_free" - ], + "file": "git2/oidarray.h", + "functions": ["git_oidarray_dispose"], "meta": {}, - "lines": 34 + "lines": 38 }, { - "file": "pack.h", + "file": "git2/pack.h", "functions": [ "git_packbuilder_new", "git_packbuilder_set_threads", @@ -585,8 +673,11 @@ "git_packbuilder_insert_commit", "git_packbuilder_insert_walk", "git_packbuilder_insert_recur", + "git_packbuilder_write_buf", "git_packbuilder_write", "git_packbuilder_hash", + "git_packbuilder_name", + "git_packbuilder_foreach_cb", "git_packbuilder_foreach", "git_packbuilder_object_count", "git_packbuilder_written", @@ -595,11 +686,12 @@ "git_packbuilder_free" ], "meta": {}, - "lines": 236 + "lines": 274 }, { - "file": "patch.h", + "file": "git2/patch.h", "functions": [ + "git_patch_owner", "git_patch_from_diff", "git_patch_from_blobs", "git_patch_from_blob_and_buffer", @@ -616,10 +708,10 @@ "git_patch_to_buf" ], "meta": {}, - "lines": 268 + "lines": 284 }, { - "file": "pathspec.h", + "file": "git2/pathspec.h", "functions": [ "git_pathspec_new", "git_pathspec_free", @@ -636,22 +728,24 @@ "git_pathspec_match_list_failed_entry" ], "meta": {}, - "lines": 277 + "lines": 284 }, { - "file": "proxy.h", - "functions": [ - "git_proxy_init_options" - ], + "file": "git2/proxy.h", + "functions": ["git_proxy_options_init"], "meta": {}, - "lines": 88 + "lines": 103 }, { - "file": "rebase.h", + "file": "git2/rebase.h", "functions": [ - "git_rebase_init_options", + "git_rebase_options_init", "git_rebase_init", "git_rebase_open", + "git_rebase_orig_head_name", + "git_rebase_orig_head_id", + "git_rebase_onto_name", + "git_rebase_onto_id", "git_rebase_operation_entrycount", "git_rebase_operation_current", "git_rebase_operation_byindex", @@ -663,10 +757,10 @@ "git_rebase_free" ], "meta": {}, - "lines": 316 + "lines": 397 }, { - "file": "refdb.h", + "file": "git2/refdb.h", "functions": [ "git_refdb_new", "git_refdb_open", @@ -674,10 +768,10 @@ "git_refdb_free" ], "meta": {}, - "lines": 63 + "lines": 66 }, { - "file": "reflog.h", + "file": "git2/reflog.h", "functions": [ "git_reflog_read", "git_reflog_write", @@ -697,7 +791,7 @@ "lines": 166 }, { - "file": "refs.h", + "file": "git2/refs.h", "functions": [ "git_reference_lookup", "git_reference_name_to_id", @@ -719,6 +813,8 @@ "git_reference_delete", "git_reference_remove", "git_reference_list", + "git_reference_foreach_cb", + "git_reference_foreach_name_cb", "git_reference_foreach", "git_reference_foreach_name", "git_reference_dup", @@ -738,32 +834,37 @@ "git_reference_is_note", "git_reference_normalize_name", "git_reference_peel", - "git_reference_is_valid_name", + "git_reference_name_is_valid", "git_reference_shorthand" ], "meta": {}, - "lines": 744 + "lines": 769 }, { - "file": "refspec.h", + "file": "git2/refspec.h", "functions": [ + "git_refspec_parse", + "git_refspec_free", "git_refspec_src", "git_refspec_dst", "git_refspec_string", "git_refspec_force", "git_refspec_direction", + "git_refspec_src_matches_negative", "git_refspec_src_matches", "git_refspec_dst_matches", "git_refspec_transform", "git_refspec_rtransform" ], "meta": {}, - "lines": 100 + "lines": 126 }, { - "file": "remote.h", + "file": "git2/remote.h", "functions": [ "git_remote_create", + "git_remote_create_options_init", + "git_remote_create_with_opts", "git_remote_create_with_fetchspec", "git_remote_create_anonymous", "git_remote_create_detached", @@ -775,25 +876,31 @@ "git_remote_pushurl", "git_remote_set_url", "git_remote_set_pushurl", + "git_remote_set_instance_url", + "git_remote_set_instance_pushurl", "git_remote_add_fetch", "git_remote_get_fetch_refspecs", "git_remote_add_push", "git_remote_get_push_refspecs", "git_remote_refspec_count", "git_remote_get_refspec", - "git_remote_connect", "git_remote_ls", "git_remote_connected", "git_remote_stop", "git_remote_disconnect", "git_remote_free", "git_remote_list", - "git_push_transfer_progress", + "git_push_transfer_progress_cb", "git_push_negotiation", "git_push_update_reference_cb", + "git_url_resolve_cb", + "git_remote_ready_cb", "git_remote_init_callbacks", - "git_fetch_init_options", - "git_push_init_options", + "git_fetch_options_init", + "git_push_options_init", + "git_remote_connect_options_init", + "git_remote_connect", + "git_remote_connect_ext", "git_remote_download", "git_remote_upload", "git_remote_update_tips", @@ -805,15 +912,15 @@ "git_remote_set_autotag", "git_remote_prune_refs", "git_remote_rename", - "git_remote_is_valid_name", + "git_remote_name_is_valid", "git_remote_delete", "git_remote_default_branch" ], "meta": {}, - "lines": 850 + "lines": 1244 }, { - "file": "repository.h", + "file": "git2/repository.h", "functions": [ "git_repository_open", "git_repository_open_from_worktree", @@ -823,11 +930,12 @@ "git_repository_open_bare", "git_repository_free", "git_repository_init", - "git_repository_init_init_options", + "git_repository_init_options_init", "git_repository_init_ext", "git_repository_head", "git_repository_head_for_worktree", "git_repository_head_detached", + "git_repository_head_detached_for_worktree", "git_repository_head_unborn", "git_repository_is_empty", "git_repository_item_path", @@ -845,7 +953,9 @@ "git_repository_message", "git_repository_message_remove", "git_repository_state_cleanup", + "git_repository_fetchhead_foreach_cb", "git_repository_fetchhead_foreach", + "git_repository_mergehead_foreach_cb", "git_repository_mergehead_foreach", "git_repository_hashfile", "git_repository_set_head", @@ -857,43 +967,41 @@ "git_repository_get_namespace", "git_repository_is_shallow", "git_repository_ident", - "git_repository_set_ident" + "git_repository_set_ident", + "git_repository_oid_type", + "git_repository_commit_parents" ], "meta": {}, - "lines": 862 + "lines": 1014 }, { - "file": "reset.h", + "file": "git2/reset.h", "functions": [ "git_reset", "git_reset_from_annotated", "git_reset_default" ], "meta": {}, - "lines": 107 + "lines": 119 }, { - "file": "revert.h", + "file": "git2/revert.h", "functions": [ - "git_revert_init_options", + "git_revert_options_init", "git_revert_commit", "git_revert" ], "meta": {}, - "lines": 84 + "lines": 91 }, { - "file": "revparse.h", - "functions": [ - "git_revparse_single", - "git_revparse_ext", - "git_revparse" - ], + "file": "git2/revparse.h", + "functions": ["git_revparse_single", "git_revparse_ext", "git_revparse"], "meta": {}, "lines": 108 }, { - "file": "revwalk.h", + "file": "git2/revwalk.h", "functions": [ "git_revwalk_new", "git_revwalk_reset", @@ -915,26 +1023,30 @@ "git_revwalk_add_hide_cb" ], "meta": {}, - "lines": 291 + "lines": 298 }, { - "file": "signature.h", + "file": "git2/signature.h", "functions": [ "git_signature_new", "git_signature_now", + "git_signature_default_from_env", "git_signature_default", "git_signature_from_buffer", "git_signature_dup", "git_signature_free" ], "meta": {}, - "lines": 99 + "lines": 143 }, { - "file": "stash.h", + "file": "git2/stash.h", "functions": [ + "git_stash_save", + "git_stash_save_options_init", + "git_stash_save_with_opts", "git_stash_apply_progress_cb", - "git_stash_apply_init_options", + "git_stash_apply_options_init", "git_stash_apply", "git_stash_cb", "git_stash_foreach", @@ -942,13 +1054,13 @@ "git_stash_pop" ], "meta": {}, - "lines": 253 + "lines": 323 }, { - "file": "status.h", + "file": "git2/status.h", "functions": [ "git_status_cb", - "git_status_init_options", + "git_status_options_init", "git_status_foreach", "git_status_foreach_ext", "git_status_file", @@ -959,27 +1071,26 @@ "git_status_should_ignore" ], "meta": {}, - "lines": 370 + "lines": 451 }, { - "file": "strarray.h", - "functions": [ - "git_strarray_free", - "git_strarray_copy" - ], + "file": "git2/strarray.h", + "functions": ["git_strarray_dispose"], "meta": {}, - "lines": 53 + "lines": 37 }, { - "file": "submodule.h", + "file": "git2/submodule.h", "functions": [ "git_submodule_cb", - "git_submodule_update_init_options", + "git_submodule_update_options_init", "git_submodule_update", "git_submodule_lookup", + "git_submodule_dup", "git_submodule_free", "git_submodule_foreach", "git_submodule_add_setup", + "git_submodule_clone", "git_submodule_add_finalize", "git_submodule_add_to_index", "git_submodule_owner", @@ -1008,184 +1119,28 @@ "git_submodule_location" ], "meta": {}, - "lines": 632 - }, - { - "file": "sys/commit.h", - "functions": [ - "git_commit_create_from_ids", - "git_commit_create_from_callback" - ], - "meta": {}, - "lines": 76 - }, - { - "file": "sys/config.h", - "functions": [ - "git_config_init_backend", - "git_config_add_backend" - ], - "meta": {}, - "lines": 126 + "lines": 674 }, { - "file": "sys/diff.h", - "functions": [ - "git_diff_print_callback__to_buf", - "git_diff_print_callback__to_file_handle", - "git_diff_get_perfdata", - "git_status_list_get_perfdata" - ], + "file": "git2/sys/commit_graph.h", + "functions": [], "meta": {}, - "lines": 90 + "lines": 99 }, + { "file": "git2/sys/config.h", "functions": [], "meta": {}, "lines": 162 }, + { "file": "git2/sys/filter.h", "functions": [], "meta": {}, "lines": 109 }, + { "file": "git2/sys/hashsig.h", "functions": [], "meta": {}, "lines": 55 }, + { "file": "git2/sys/merge.h", "functions": [], "meta": {}, "lines": 49 }, + { "file": "git2/sys/path.h", "functions": [], "meta": {}, "lines": 51 }, + { "file": "git2/sys/stream.h", "functions": [], "meta": {}, "lines": 105 }, { - "file": "sys/filter.h", - "functions": [ - "git_filter_lookup", - "git_filter_list_new", - "git_filter_list_push", - "git_filter_list_length", - "git_filter_source_repo", - "git_filter_source_path", - "git_filter_source_filemode", - "git_filter_source_id", - "git_filter_source_mode", - "git_filter_source_flags", - "git_filter_init_fn", - "git_filter_shutdown_fn", - "git_filter_check_fn", - "git_filter_apply_fn", - "git_filter_cleanup_fn", - "git_filter_init", - "git_filter_register", - "git_filter_unregister" - ], + "file": "git2/sys/transport.h", + "functions": [], "meta": {}, "lines": 328 }, { - "file": "sys/hashsig.h", - "functions": [ - "git_hashsig_create", - "git_hashsig_create_fromfile", - "git_hashsig_free", - "git_hashsig_compare" - ], - "meta": {}, - "lines": 102 - }, - { - "file": "sys/mempack.h", - "functions": [ - "git_mempack_new", - "git_mempack_dump", - "git_mempack_reset" - ], - "meta": {}, - "lines": 82 - }, - { - "file": "sys/merge.h", - "functions": [ - "git_merge_driver_init_fn", - "git_merge_driver_shutdown_fn", - "git_merge_driver_apply_fn" - ], - "meta": {}, - "lines": 135 - }, - { - "file": "sys/odb_backend.h", - "functions": [ - "git_odb_init_backend" - ], - "meta": {}, - "lines": 118 - }, - { - "file": "sys/openssl.h", - "functions": [ - "git_openssl_set_locking" - ], - "meta": {}, - "lines": 34 - }, - { - "file": "sys/refdb_backend.h", - "functions": [ - "git_refdb_init_backend", - "git_refdb_backend_fs", - "git_refdb_set_backend" - ], - "meta": {}, - "lines": 214 - }, - { - "file": "sys/refs.h", - "functions": [ - "git_reference__alloc", - "git_reference__alloc_symbolic" - ], - "meta": {}, - "lines": 45 - }, - { - "file": "sys/repository.h", - "functions": [ - "git_repository_new", - "git_repository__cleanup", - "git_repository_reinit_filesystem", - "git_repository_set_config", - "git_repository_set_odb", - "git_repository_set_refdb", - "git_repository_set_index", - "git_repository_set_bare", - "git_repository_submodule_cache_all", - "git_repository_submodule_cache_clear" - ], - "meta": {}, - "lines": 165 - }, - { - "file": "sys/stream.h", - "functions": [ - "git_stream_register_tls" - ], - "meta": {}, - "lines": 54 - }, - { - "file": "sys/time.h", - "functions": [ - "git_time_monotonic" - ], - "meta": {}, - "lines": 27 - }, - { - "file": "sys/transport.h", - "functions": [ - "git_transport_init", - "git_transport_new", - "git_transport_ssh_with_paths", - "git_transport_register", - "git_transport_unregister", - "git_transport_dummy", - "git_transport_local", - "git_transport_smart", - "git_transport_smart_certificate_check", - "git_transport_smart_credentials", - "git_transport_smart_proxy_options", - "git_smart_subtransport_http", - "git_smart_subtransport_git", - "git_smart_subtransport_ssh" - ], - "meta": {}, - "lines": 389 - }, - { - "file": "tag.h", + "file": "git2/tag.h", "functions": [ "git_tag_lookup", "git_tag_lookup_prefix", @@ -1200,48 +1155,49 @@ "git_tag_message", "git_tag_create", "git_tag_annotation_create", - "git_tag_create_frombuffer", + "git_tag_create_from_buffer", "git_tag_create_lightweight", "git_tag_delete", "git_tag_list", "git_tag_list_match", + "git_tag_foreach_cb", "git_tag_foreach", "git_tag_peel", - "git_tag_dup" + "git_tag_dup", + "git_tag_name_is_valid" ], "meta": {}, - "lines": 357 + "lines": 380 }, { - "file": "trace.h", - "functions": [ - "git_trace_callback", - "git_trace_set" - ], + "file": "git2/trace.h", + "functions": ["git_trace_cb", "git_trace_set"], "meta": {}, - "lines": 63 + "lines": 68 }, { - "file": "transport.h", + "file": "git2/transaction.h", "functions": [ - "git_transport_cb", - "git_cred_has_username", - "git_cred_userpass_plaintext_new", - "git_cred_ssh_key_new", - "git_cred_ssh_interactive_new", - "git_cred_ssh_key_from_agent", - "git_cred_ssh_custom_new", - "git_cred_default_new", - "git_cred_username_new", - "git_cred_ssh_key_memory_new", - "git_cred_free", - "git_cred_acquire_cb" + "git_transaction_new", + "git_transaction_lock_ref", + "git_transaction_set_target", + "git_transaction_set_symbolic_target", + "git_transaction_set_reflog", + "git_transaction_remove", + "git_transaction_commit", + "git_transaction_free" ], "meta": {}, - "lines": 338 + "lines": 117 + }, + { + "file": "git2/transport.h", + "functions": ["git_transport_message_cb", "git_transport_cb"], + "meta": {}, + "lines": 45 }, { - "file": "tree.h", + "file": "git2/tree.h", "functions": [ "git_tree_lookup", "git_tree_lookup_prefix", @@ -1272,52 +1228,44 @@ "git_treebuilder_filter_cb", "git_treebuilder_filter", "git_treebuilder_write", - "git_treebuilder_write_with_buffer", "git_treewalk_cb", "git_tree_walk", "git_tree_dup", "git_tree_create_updated" ], "meta": {}, - "lines": 479 - }, - { - "file": "types.h", - "functions": [ - "git_transfer_progress_cb", - "git_transport_message_cb", - "git_transport_certificate_check_cb" - ], - "meta": {}, - "lines": 429 + "lines": 481 }, + { "file": "git2/types.h", "functions": [], "meta": {}, "lines": 382 }, { - "file": "worktree.h", + "file": "git2/worktree.h", "functions": [ "git_worktree_list", "git_worktree_lookup", "git_worktree_open_from_repository", "git_worktree_free", "git_worktree_validate", - "git_worktree_add_init_options", + "git_worktree_add_options_init", "git_worktree_add", "git_worktree_lock", "git_worktree_unlock", "git_worktree_is_locked", - "git_worktree_prune_init_options", + "git_worktree_name", + "git_worktree_path", + "git_worktree_prune_options_init", "git_worktree_is_prunable", "git_worktree_prune" ], "meta": {}, - "lines": 216 + "lines": 273 } ], "functions": { "git_annotated_commit_from_ref": { "type": "function", - "file": "annotated_commit.h", - "line": 33, - "lineto": 36, + "file": "git2/annotated_commit.h", + "line": 40, + "lineto": 43, "args": [ { "name": "out", @@ -1337,24 +1285,21 @@ ], "argline": "git_annotated_commit **out, git_repository *repo, const git_reference *ref", "sig": "git_annotated_commit **::git_repository *::const git_reference *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, + "return": { "type": "int", "comment": " 0 on success or error code" }, "description": "

Creates a git_annotated_commit from the given reference.\n The resulting git_annotated_commit must be freed with\n git_annotated_commit_free.

\n", "comments": "", "group": "annotated", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_annotated_commit_from_ref-1" + "checkout.c": [ + "ex/v1.9.1/checkout.html#git_annotated_commit_from_ref-1" ] } }, "git_annotated_commit_from_fetchhead": { "type": "function", - "file": "annotated_commit.h", - "line": 50, - "lineto": 55, + "file": "git2/annotated_commit.h", + "line": 57, + "lineto": 62, "args": [ { "name": "out", @@ -1384,19 +1329,16 @@ ], "argline": "git_annotated_commit **out, git_repository *repo, const char *branch_name, const char *remote_url, const git_oid *id", "sig": "git_annotated_commit **::git_repository *::const char *::const char *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, + "return": { "type": "int", "comment": " 0 on success or error code" }, "description": "

Creates a git_annotated_commit from the given fetch head data.\n The resulting git_annotated_commit must be freed with\n git_annotated_commit_free.

\n", "comments": "", "group": "annotated" }, "git_annotated_commit_lookup": { "type": "function", - "file": "annotated_commit.h", - "line": 75, - "lineto": 78, + "file": "git2/annotated_commit.h", + "line": 82, + "lineto": 85, "args": [ { "name": "out", @@ -1416,24 +1358,16 @@ ], "argline": "git_annotated_commit **out, git_repository *repo, const git_oid *id", "sig": "git_annotated_commit **::git_repository *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, + "return": { "type": "int", "comment": " 0 on success or error code" }, "description": "

Creates a git_annotated_commit from the given commit id.\n The resulting git_annotated_commit must be freed with\n git_annotated_commit_free.

\n", "comments": "

An annotated commit contains information about how it was looked up, which may be useful for functions like merge or rebase to provide context to the operation. For example, conflict files will include the name of the source or target branches being merged. It is therefore preferable to use the most specific function (eg git_annotated_commit_from_ref) instead of this one when that data is known.

\n", - "group": "annotated", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_annotated_commit_lookup-2" - ] - } + "group": "annotated" }, "git_annotated_commit_from_revspec": { "type": "function", - "file": "annotated_commit.h", - "line": 92, - "lineto": 95, + "file": "git2/annotated_commit.h", + "line": 99, + "lineto": 102, "args": [ { "name": "out", @@ -1453,19 +1387,16 @@ ], "argline": "git_annotated_commit **out, git_repository *repo, const char *revspec", "sig": "git_annotated_commit **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, - "description": "

Creates a git_annotated_comit from a revision string.

\n", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Creates a git_annotated_commit from a revision string.

\n", "comments": "

See man gitrevisions, or http://git-scm.com/docs/git-rev-parse.html#_specifying_revisions for information on the syntax accepted.

\n", "group": "annotated" }, "git_annotated_commit_id": { "type": "function", - "file": "annotated_commit.h", - "line": 103, - "lineto": 104, + "file": "git2/annotated_commit.h", + "line": 110, + "lineto": 111, "args": [ { "name": "commit", @@ -1475,26 +1406,50 @@ ], "argline": "const git_annotated_commit *commit", "sig": "const git_annotated_commit *", - "return": { - "type": "const git_oid *", - "comment": " commit id" - }, + "return": { "type": "const git_oid *", "comment": " commit id" }, "description": "

Gets the commit ID that the given git_annotated_commit refers to.

\n", "comments": "", "group": "annotated", "examples": { + "checkout.c": ["ex/v1.9.1/checkout.html#git_annotated_commit_id-2"], "merge.c": [ - "ex/HEAD/merge.html#git_annotated_commit_id-3", - "ex/HEAD/merge.html#git_annotated_commit_id-4", - "ex/HEAD/merge.html#git_annotated_commit_id-5" + "ex/v1.9.1/merge.html#git_annotated_commit_id-1", + "ex/v1.9.1/merge.html#git_annotated_commit_id-2", + "ex/v1.9.1/merge.html#git_annotated_commit_id-3" + ] + } + }, + "git_annotated_commit_ref": { + "type": "function", + "file": "git2/annotated_commit.h", + "line": 119, + "lineto": 120, + "args": [ + { + "name": "commit", + "type": "const git_annotated_commit *", + "comment": "the given annotated commit" + } + ], + "argline": "const git_annotated_commit *commit", + "sig": "const git_annotated_commit *", + "return": { "type": "const char *", "comment": " ref name." }, + "description": "

Get the refname that the given git_annotated_commit refers to.

\n", + "comments": "", + "group": "annotated", + "examples": { + "checkout.c": [ + "ex/v1.9.1/checkout.html#git_annotated_commit_ref-3", + "ex/v1.9.1/checkout.html#git_annotated_commit_ref-4", + "ex/v1.9.1/checkout.html#git_annotated_commit_ref-5" ] } }, "git_annotated_commit_free": { "type": "function", - "file": "annotated_commit.h", - "line": 111, - "lineto": 112, + "file": "git2/annotated_commit.h", + "line": 127, + "lineto": 128, "args": [ { "name": "commit", @@ -1504,30 +1459,123 @@ ], "argline": "git_annotated_commit *commit", "sig": "git_annotated_commit *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Frees a git_annotated_commit.

\n", "comments": "", - "group": "annotated" + "group": "annotated", + "examples": { + "checkout.c": ["ex/v1.9.1/checkout.html#git_annotated_commit_free-6"] + } }, - "git_attr_value": { + "git_apply_options_init": { "type": "function", - "file": "attr.h", - "line": 102, - "lineto": 102, + "file": "git2/apply.h", + "line": 127, + "lineto": 127, "args": [ { - "name": "attr", - "type": "const char *", - "comment": "The attribute" - } + "name": "opts", + "type": "git_apply_options *", + "comment": "The `git_apply_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_APPLY_OPTIONS_VERSION`" + } + ], + "argline": "git_apply_options *opts, unsigned int version", + "sig": "git_apply_options *::unsigned int", + "return": { "type": "int", "comment": " 0 on success or -1 on failure." }, + "description": "

Initialize git_apply_options structure

\n", + "comments": "

Initialize a git_apply_options with default values. Equivalent to creating an instance with GIT_APPLY_OPTIONS_INIT.

\n", + "group": "apply" + }, + "git_apply_to_tree": { + "type": "function", + "file": "git2/apply.h", + "line": 140, + "lineto": 145, + "args": [ + { + "name": "out", + "type": "git_index **", + "comment": "the postimage of the application" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository to apply" + }, + { + "name": "preimage", + "type": "git_tree *", + "comment": "the tree to apply the diff to" + }, + { + "name": "diff", + "type": "git_diff *", + "comment": "the diff to apply" + }, + { + "name": "options", + "type": "const git_apply_options *", + "comment": "the options for the apply (or null for defaults)" + } + ], + "argline": "git_index **out, git_repository *repo, git_tree *preimage, git_diff *diff, const git_apply_options *options", + "sig": "git_index **::git_repository *::git_tree *::git_diff *::const git_apply_options *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Apply a git_diff to a git_tree, and return the resulting image\n as an index.

\n", + "comments": "", + "group": "apply" + }, + "git_apply": { + "type": "function", + "file": "git2/apply.h", + "line": 178, + "lineto": 182, + "args": [ + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository to apply to" + }, + { + "name": "diff", + "type": "git_diff *", + "comment": "the diff to apply" + }, + { + "name": "location", + "type": "git_apply_location_t", + "comment": "the location to apply (workdir, index or both)" + }, + { + "name": "options", + "type": "const git_apply_options *", + "comment": "the options for the apply (or null for defaults)" + } + ], + "argline": "git_repository *repo, git_diff *diff, git_apply_location_t location, const git_apply_options *options", + "sig": "git_repository *::git_diff *::git_apply_location_t::const git_apply_options *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Apply a git_diff to the given repository, making changes directly\n in the working directory, the index, or both.

\n", + "comments": "", + "group": "apply" + }, + "git_attr_value": { + "type": "function", + "file": "git2/attr.h", + "line": 106, + "lineto": 106, + "args": [ + { "name": "attr", "type": "const char *", "comment": "The attribute" } ], "argline": "const char *attr", "sig": "const char *", "return": { - "type": "git_attr_t", + "type": "git_attr_value_t", "comment": " the value type for the attribute" }, "description": "

Return the value type for a given attribute.

\n", @@ -1536,9 +1584,9 @@ }, "git_attr_get": { "type": "function", - "file": "attr.h", - "line": 145, - "lineto": 150, + "file": "git2/attr.h", + "line": 195, + "lineto": 200, "args": [ { "name": "value_out", @@ -1568,19 +1616,55 @@ ], "argline": "const char **value_out, git_repository *repo, uint32_t flags, const char *path, const char *name", "sig": "const char **::git_repository *::uint32_t::const char *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Look up the value of one git attribute for path.

\n", "comments": "", "group": "attr" }, + "git_attr_get_ext": { + "type": "function", + "file": "git2/attr.h", + "line": 218, + "lineto": 223, + "args": [ + { + "name": "value_out", + "type": "const char **", + "comment": "Output of the value of the attribute. Use the GIT_ATTR_...\n macros to test for TRUE, FALSE, UNSPECIFIED, etc. or just\n use the string value for attributes set to a value. You\n should NOT modify or free this value." + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the path." + }, + { + "name": "opts", + "type": "git_attr_options *", + "comment": "The `git_attr_options` to use when querying these attributes." + }, + { + "name": "path", + "type": "const char *", + "comment": "The path to check for attributes. Relative paths are\n interpreted relative to the repo root. The file does\n not have to exist, but if it does not, then it will be\n treated as a plain file (not a directory)." + }, + { + "name": "name", + "type": "const char *", + "comment": "The name of the attribute to look up." + } + ], + "argline": "const char **value_out, git_repository *repo, git_attr_options *opts, const char *path, const char *name", + "sig": "const char **::git_repository *::git_attr_options *::const char *::const char *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Look up the value of one git attribute for path with extended options.

\n", + "comments": "", + "group": "attr" + }, "git_attr_get_many": { "type": "function", - "file": "attr.h", - "line": 181, - "lineto": 187, + "file": "git2/attr.h", + "line": 255, + "lineto": 261, "args": [ { "name": "values_out", @@ -1615,19 +1699,60 @@ ], "argline": "const char **values_out, git_repository *repo, uint32_t flags, const char *path, size_t num_attr, const char **names", "sig": "const char **::git_repository *::uint32_t::const char *::size_t::const char **", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Look up a list of git attributes for path.

\n", "comments": "

Use this if you have a known list of attributes that you want to look up in a single call. This is somewhat more efficient than calling git_attr_get() multiple times.

\n\n

For example, you might write:

\n\n
 const char *attrs[] = { "crlf", "diff", "foo" };     const char **values[3];     git_attr_get_many(values, repo, 0, "my/fun/file.c", 3, attrs);\n
\n\n

Then you could loop through the 3 values to get the settings for the three attributes you asked about.

\n", "group": "attr" }, + "git_attr_get_many_ext": { + "type": "function", + "file": "git2/attr.h", + "line": 280, + "lineto": 286, + "args": [ + { + "name": "values_out", + "type": "const char **", + "comment": "An array of num_attr entries that will have string\n pointers written into it for the values of the attributes.\n You should not modify or free the values that are written\n into this array (although of course, you should free the\n array itself if you allocated it)." + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the path." + }, + { + "name": "opts", + "type": "git_attr_options *", + "comment": "The `git_attr_options` to use when querying these attributes." + }, + { + "name": "path", + "type": "const char *", + "comment": "The path inside the repo to check attributes. This\n does not have to exist, but if it does not, then\n it will be treated as a plain file (i.e. not a directory)." + }, + { + "name": "num_attr", + "type": "size_t", + "comment": "The number of attributes being looked up" + }, + { + "name": "names", + "type": "const char **", + "comment": "An array of num_attr strings containing attribute names." + } + ], + "argline": "const char **values_out, git_repository *repo, git_attr_options *opts, const char *path, size_t num_attr, const char **names", + "sig": "const char **::git_repository *::git_attr_options *::const char *::size_t::const char **", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Look up a list of git attributes for path with extended options.

\n", + "comments": "", + "group": "attr" + }, "git_attr_foreach": { "type": "function", - "file": "attr.h", - "line": 209, - "lineto": 214, + "file": "git2/attr.h", + "line": 319, + "lineto": 324, "args": [ { "name": "repo", @@ -1647,7 +1772,7 @@ { "name": "callback", "type": "git_attr_foreach_cb", - "comment": "Function to invoke on each attribute name and value. The\n value may be NULL is the attribute is explicitly set to\n UNSPECIFIED using the '!' sign. Callback will be invoked\n only once per attribute name, even if there are multiple\n rules for a given file. The highest priority rule will be\n used. Return a non-zero value from this to stop looping.\n The value will be returned from `git_attr_foreach`." + "comment": "Function to invoke on each attribute name and value.\n See git_attr_foreach_cb." }, { "name": "payload", @@ -1665,75 +1790,111 @@ "comments": "", "group": "attr" }, + "git_attr_foreach_ext": { + "type": "function", + "file": "git2/attr.h", + "line": 339, + "lineto": 344, + "args": [ + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the path." + }, + { + "name": "opts", + "type": "git_attr_options *", + "comment": "The `git_attr_options` to use when querying these attributes." + }, + { + "name": "path", + "type": "const char *", + "comment": "Path inside the repo to check attributes. This does not have\n to exist, but if it does not, then it will be treated as a\n plain file (i.e. not a directory)." + }, + { + "name": "callback", + "type": "git_attr_foreach_cb", + "comment": "Function to invoke on each attribute name and value.\n See git_attr_foreach_cb." + }, + { + "name": "payload", + "type": "void *", + "comment": "Passed on as extra parameter to callback function." + } + ], + "argline": "git_repository *repo, git_attr_options *opts, const char *path, git_attr_foreach_cb callback, void *payload", + "sig": "git_repository *::git_attr_options *::const char *::git_attr_foreach_cb::void *", + "return": { + "type": "int", + "comment": " 0 on success, non-zero callback return value, or error code" + }, + "description": "

Loop over all the git attributes for a path with extended options.

\n", + "comments": "", + "group": "attr" + }, "git_attr_cache_flush": { "type": "function", - "file": "attr.h", - "line": 224, - "lineto": 225, + "file": "git2/attr.h", + "line": 357, + "lineto": 358, "args": [ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "The repository containing the gitattributes cache" } ], "argline": "git_repository *repo", "sig": "git_repository *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Flush the gitattributes cache.

\n", "comments": "

Call this if you have reason to believe that the attributes files on disk no longer match the cached contents of memory. This will cause the attributes files to be reloaded the next time that an attribute access function is called.

\n", "group": "attr" }, "git_attr_add_macro": { "type": "function", - "file": "attr.h", - "line": 237, - "lineto": 240, + "file": "git2/attr.h", + "line": 375, + "lineto": 378, "args": [ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "The repository to add the macro in." }, { "name": "name", "type": "const char *", - "comment": null + "comment": "The name of the macro." }, { "name": "values", "type": "const char *", - "comment": null + "comment": "The value for the macro." } ], "argline": "git_repository *repo, const char *name, const char *values", "sig": "git_repository *::const char *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Add a macro definition.

\n", - "comments": "

Macros will automatically be loaded from the top level .gitattributes file of the repository (plus the build-in "binary" macro). This function allows you to add others. For example, to add the default macro, you would call:

\n\n
 git_attr_add_macro(repo, "binary", "-diff -crlf");\n
\n", + "comments": "

Macros will automatically be loaded from the top level .gitattributes file of the repository (plus the built-in "binary" macro). This function allows you to add others. For example, to add the default macro, you would call:

\n\n
 git_attr_add_macro(repo, "binary", "-diff -crlf");\n
\n", "group": "attr" }, - "git_blame_init_options": { + "git_blame_options_init": { "type": "function", - "file": "blame.h", - "line": 92, - "lineto": 94, + "file": "git2/blame.h", + "line": 146, + "lineto": 148, "args": [ { "name": "opts", "type": "git_blame_options *", - "comment": "The `git_blame_options` struct to initialize" + "comment": "The `git_blame_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version of struct; pass `GIT_BLAME_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_BLAME_OPTIONS_VERSION`." } ], "argline": "git_blame_options *opts, unsigned int version", @@ -1742,37 +1903,53 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_blame_options with default values. Equivalent to\n creating an instance with GIT_BLAME_OPTIONS_INIT.

\n", + "description": "

Initialize git_blame_options structure

\n", + "comments": "

Initializes a git_blame_options with default values. Equivalent to creating an instance with GIT_BLAME_OPTIONS_INIT.

\n", + "group": "blame" + }, + "git_blame_linecount": { + "type": "function", + "file": "git2/blame.h", + "line": 244, + "lineto": 244, + "args": [ + { + "name": "blame", + "type": "git_blame *", + "comment": "The blame structure to query." + } + ], + "argline": "git_blame *blame", + "sig": "git_blame *", + "return": { "type": "size_t", "comment": " The number of line." }, + "description": "

Gets the number of lines that exist in the blame structure.

\n", "comments": "", "group": "blame" }, - "git_blame_get_hunk_count": { + "git_blame_hunkcount": { "type": "function", - "file": "blame.h", - "line": 137, - "lineto": 137, + "file": "git2/blame.h", + "line": 252, + "lineto": 252, "args": [ { "name": "blame", "type": "git_blame *", - "comment": null + "comment": "The blame structure to query." } ], "argline": "git_blame *blame", "sig": "git_blame *", - "return": { - "type": "uint32_t", - "comment": null - }, + "return": { "type": "size_t", "comment": " The number of hunks." }, "description": "

Gets the number of hunks that exist in the blame structure.

\n", "comments": "", "group": "blame" }, - "git_blame_get_hunk_byindex": { + "git_blame_hunk_byindex": { "type": "function", - "file": "blame.h", - "line": 146, - "lineto": 148, + "file": "git2/blame.h", + "line": 261, + "lineto": 263, "args": [ { "name": "blame", @@ -1781,12 +1958,12 @@ }, { "name": "index", - "type": "uint32_t", + "type": "size_t", "comment": "index of the hunk to retrieve" } ], - "argline": "git_blame *blame, uint32_t index", - "sig": "git_blame *::uint32_t", + "argline": "git_blame *blame, size_t index", + "sig": "git_blame *::size_t", "return": { "type": "const git_blame_hunk *", "comment": " the hunk at the given index, or NULL on error" @@ -1795,11 +1972,11 @@ "comments": "", "group": "blame" }, - "git_blame_get_hunk_byline": { + "git_blame_hunk_byline": { "type": "function", - "file": "blame.h", - "line": 157, - "lineto": 159, + "file": "git2/blame.h", + "line": 273, + "lineto": 275, "args": [ { "name": "blame", @@ -1818,62 +1995,118 @@ "type": "const git_blame_hunk *", "comment": " the hunk that contains the given line, or NULL on error" }, - "description": "

Gets the hunk that relates to the given line number in the newest commit.

\n", + "description": "

Gets the hunk that relates to the given line number in the newest\n commit.

\n", "comments": "", "group": "blame", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_blame_get_hunk_byline-1" - ] + "blame.c": ["ex/v1.9.1/blame.html#git_blame_hunk_byline-1"] } }, - "git_blame_file": { + "git_blame_line_byindex": { "type": "function", - "file": "blame.h", - "line": 172, - "lineto": 176, + "file": "git2/blame.h", + "line": 284, + "lineto": 286, "args": [ { - "name": "out", - "type": "git_blame **", - "comment": "pointer that will receive the blame object" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "repository whose history is to be walked" - }, - { - "name": "path", - "type": "const char *", - "comment": "path to file to consider" + "name": "blame", + "type": "git_blame *", + "comment": "the blame structure to query" }, { - "name": "options", - "type": "git_blame_options *", - "comment": "options for the blame operation. If NULL, this is treated as\n though GIT_BLAME_OPTIONS_INIT were passed." + "name": "idx", + "type": "size_t", + "comment": "the (1-based) line number" } ], - "argline": "git_blame **out, git_repository *repo, const char *path, git_blame_options *options", - "sig": "git_blame **::git_repository *::const char *::git_blame_options *", + "argline": "git_blame *blame, size_t idx", + "sig": "git_blame *::size_t", "return": { - "type": "int", - "comment": " 0 on success, or an error code. (use giterr_last for information\n about the error.)" + "type": "const git_blame_line *", + "comment": " the blamed line, or NULL on error" }, - "description": "

Get the blame for a single file.

\n", + "description": "

Gets the information about the line in the blame.

\n", "comments": "", - "group": "blame", - "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_blame_file-2" - ] - } + "group": "blame" }, - "git_blame_buffer": { + "git_blame_get_hunk_count": { "type": "function", - "file": "blame.h", - "line": 196, - "lineto": 200, + "file": "git2/blame.h", + "line": 296, + "lineto": 296, + "args": [ + { + "name": "blame", + "type": "git_blame *", + "comment": "The blame structure to query." + } + ], + "argline": "git_blame *blame", + "sig": "git_blame *", + "return": { "type": "uint32_t", "comment": " The number of hunks." }, + "description": "

Gets the number of hunks that exist in the blame structure.

\n", + "comments": "", + "group": "blame" + }, + "git_blame_get_hunk_byindex": { + "type": "function", + "file": "git2/blame.h", + "line": 305, + "lineto": 307, + "args": [ + { + "name": "blame", + "type": "git_blame *", + "comment": "the blame structure to query" + }, + { + "name": "index", + "type": "uint32_t", + "comment": "index of the hunk to retrieve" + } + ], + "argline": "git_blame *blame, uint32_t index", + "sig": "git_blame *::uint32_t", + "return": { + "type": "const git_blame_hunk *", + "comment": " the hunk at the given index, or NULL on error" + }, + "description": "

Gets the blame hunk at the given index.

\n", + "comments": "", + "group": "blame" + }, + "git_blame_get_hunk_byline": { + "type": "function", + "file": "git2/blame.h", + "line": 316, + "lineto": 318, + "args": [ + { + "name": "blame", + "type": "git_blame *", + "comment": "the blame structure to query" + }, + { + "name": "lineno", + "type": "size_t", + "comment": "the (1-based) line number to find a hunk for" + } + ], + "argline": "git_blame *blame, size_t lineno", + "sig": "git_blame *::size_t", + "return": { + "type": "const git_blame_hunk *", + "comment": " the hunk that contains the given line, or NULL on error" + }, + "description": "

Gets the hunk that relates to the given line number in the newest commit.

\n", + "comments": "", + "group": "blame" + }, + "git_blame_buffer": { + "type": "function", + "file": "git2/blame.h", + "line": 374, + "lineto": 378, "args": [ { "name": "out", @@ -1881,7 +2114,7 @@ "comment": "pointer that will receive the resulting blame data" }, { - "name": "reference", + "name": "base", "type": "git_blame *", "comment": "cached blame from the history of the file (usually the output\n from git_blame_file)" }, @@ -1896,21 +2129,21 @@ "comment": "number of valid bytes in the buffer" } ], - "argline": "git_blame **out, git_blame *reference, const char *buffer, size_t buffer_len", + "argline": "git_blame **out, git_blame *base, const char *buffer, size_t buffer_len", "sig": "git_blame **::git_blame *::const char *::size_t", "return": { "type": "int", - "comment": " 0 on success, or an error code. (use giterr_last for information\n about the error)" + "comment": " 0 on success, or an error code. (use git_error_last for information\n about the error)" }, - "description": "

Get blame data for a file that has been modified in memory. The reference\n parameter is a pre-calculated blame for the in-odb history of the file. This\n means that once a file blame is completed (which can be expensive), updating\n the buffer blame is very fast.

\n", + "description": "

Get blame data for a file that has been modified in memory. The blame\n parameter is a pre-calculated blame for the in-odb history of the file.\n This means that once a file blame is completed (which can be expensive),\n updating the buffer blame is very fast.

\n", "comments": "

Lines that differ between the buffer and the committed version are marked as having a zero OID for their final_commit_id.

\n", "group": "blame" }, "git_blame_free": { "type": "function", - "file": "blame.h", - "line": 207, - "lineto": 207, + "file": "git2/blame.h", + "line": 385, + "lineto": 385, "args": [ { "name": "blame", @@ -1920,24 +2153,17 @@ ], "argline": "git_blame *blame", "sig": "git_blame *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free memory allocated by git_blame_file or git_blame_buffer.

\n", "comments": "", "group": "blame", - "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_blame_free-3" - ] - } + "examples": { "blame.c": ["ex/v1.9.1/blame.html#git_blame_free-2"] } }, "git_blob_lookup": { "type": "function", - "file": "blob.h", - "line": 33, - "lineto": 33, + "file": "git2/blob.h", + "line": 37, + "lineto": 40, "args": [ { "name": "blob", @@ -1957,27 +2183,20 @@ ], "argline": "git_blob **blob, git_repository *repo, const git_oid *id", "sig": "git_blob **::git_repository *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a blob object from a repository.

\n", "comments": "", "group": "blob", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_blob_lookup-4" - ], - "general.c": [ - "ex/HEAD/general.html#git_blob_lookup-1" - ] + "blame.c": ["ex/v1.9.1/blame.html#git_blob_lookup-3"], + "general.c": ["ex/v1.9.1/general.html#git_blob_lookup-1"] } }, "git_blob_lookup_prefix": { "type": "function", - "file": "blob.h", - "line": 47, - "lineto": 47, + "file": "git2/blob.h", + "line": 54, + "lineto": 54, "args": [ { "name": "blob", @@ -2002,49 +2221,35 @@ ], "argline": "git_blob **blob, git_repository *repo, const git_oid *id, size_t len", "sig": "git_blob **::git_repository *::const git_oid *::size_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a blob object from a repository,\n given a prefix of its identifier (short id).

\n", "comments": "", "group": "blob" }, "git_blob_free": { "type": "function", - "file": "blob.h", - "line": 60, - "lineto": 60, + "file": "git2/blob.h", + "line": 67, + "lineto": 67, "args": [ - { - "name": "blob", - "type": "git_blob *", - "comment": "the blob to close" - } + { "name": "blob", "type": "git_blob *", "comment": "the blob to close" } ], "argline": "git_blob *blob", "sig": "git_blob *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an open blob

\n", "comments": "

This is a wrapper around git_object_free()

\n\n

IMPORTANT: It is necessary to call this method when you stop using a blob. Failure to do so will cause a memory leak.

\n", "group": "blob", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_blob_free-5" - ], - "general.c": [ - "ex/HEAD/general.html#git_blob_free-2" - ] + "blame.c": ["ex/v1.9.1/blame.html#git_blob_free-4"], + "general.c": ["ex/v1.9.1/general.html#git_blob_free-2"] } }, "git_blob_id": { "type": "function", - "file": "blob.h", - "line": 68, - "lineto": 68, + "file": "git2/blob.h", + "line": 75, + "lineto": 75, "args": [ { "name": "blob", @@ -2064,9 +2269,9 @@ }, "git_blob_owner": { "type": "function", - "file": "blob.h", - "line": 76, - "lineto": 76, + "file": "git2/blob.h", + "line": 83, + "lineto": 83, "args": [ { "name": "blob", @@ -2086,9 +2291,9 @@ }, "git_blob_rawcontent": { "type": "function", - "file": "blob.h", - "line": 89, - "lineto": 89, + "file": "git2/blob.h", + "line": 96, + "lineto": 96, "args": [ { "name": "blob", @@ -2100,28 +2305,22 @@ "sig": "const git_blob *", "return": { "type": "const void *", - "comment": " the pointer" + "comment": " \n\n `unsigned char *` the pointer, or NULL on error" }, "description": "

Get a read-only buffer with the raw content of a blob.

\n", "comments": "

A pointer to the raw content of a blob is returned; this pointer is owned internally by the object and shall not be free'd. The pointer may be invalidated at a later time.

\n", "group": "blob", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_blob_rawcontent-6" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_blob_rawcontent-1" - ], - "general.c": [ - "ex/HEAD/general.html#git_blob_rawcontent-3" - ] + "blame.c": ["ex/v1.9.1/blame.html#git_blob_rawcontent-5"], + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_blob_rawcontent-1"], + "general.c": ["ex/v1.9.1/general.html#git_blob_rawcontent-3"] } }, "git_blob_rawsize": { "type": "function", - "file": "blob.h", - "line": 97, - "lineto": 97, + "file": "git2/blob.h", + "line": 104, + "lineto": 104, "args": [ { "name": "blob", @@ -2131,31 +2330,51 @@ ], "argline": "const git_blob *blob", "sig": "const git_blob *", - "return": { - "type": "git_off_t", - "comment": " size on bytes" - }, + "return": { "type": "git_object_size_t", "comment": " size in bytes" }, "description": "

Get the size in bytes of the contents of a blob

\n", "comments": "", "group": "blob", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_blob_rawsize-7" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_blob_rawsize-2" - ], + "blame.c": ["ex/v1.9.1/blame.html#git_blob_rawsize-6"], + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_blob_rawsize-2"], "general.c": [ - "ex/HEAD/general.html#git_blob_rawsize-4", - "ex/HEAD/general.html#git_blob_rawsize-5" + "ex/v1.9.1/general.html#git_blob_rawsize-4", + "ex/v1.9.1/general.html#git_blob_rawsize-5" ] } }, - "git_blob_filtered_content": { + "git_blob_filter_options_init": { "type": "function", - "file": "blob.h", - "line": 122, - "lineto": 126, + "file": "git2/blob.h", + "line": 201, + "lineto": 203, + "args": [ + { + "name": "opts", + "type": "git_blob_filter_options *", + "comment": "The `git_blob_filter_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass GIT_BLOB_FILTER_OPTIONS_VERSION" + } + ], + "argline": "git_blob_filter_options *opts, unsigned int version", + "sig": "git_blob_filter_options *::unsigned int", + "return": { + "type": "int", + "comment": " Zero on success; -1 on failure." + }, + "description": "

Initialize git_blob_filter_options structure

\n", + "comments": "

Initializes a git_blob_filter_options with default values. Equivalent to creating an instance with GIT_BLOB_FILTER_OPTIONS_INIT.

\n", + "group": "blob" + }, + "git_blob_filter": { + "type": "function", + "file": "git2/blob.h", + "line": 227, + "lineto": 231, "args": [ { "name": "out", @@ -2173,26 +2392,26 @@ "comment": "Path used for file attribute lookups, etc." }, { - "name": "check_for_binary_data", - "type": "int", - "comment": "Should this test if blob content contains\n NUL bytes / looks like binary data before applying filters?" + "name": "opts", + "type": "git_blob_filter_options *", + "comment": "Options to use for filtering the blob" } ], - "argline": "git_buf *out, git_blob *blob, const char *as_path, int check_for_binary_data", - "sig": "git_buf *::git_blob *::const char *::int", + "argline": "git_buf *out, git_blob *blob, const char *as_path, git_blob_filter_options *opts", + "sig": "git_buf *::git_blob *::const char *::git_blob_filter_options *", "return": { "type": "int", - "comment": " 0 on success or an error code" + "comment": " \n\n[enum] git_error_code 0 on success or an error code" }, "description": "

Get a buffer with the filtered content of a blob.

\n", - "comments": "

This applies filters as if the blob was being checked out to the working directory under the specified filename. This may apply CRLF filtering or other types of changes depending on the file attributes set for the blob and the content detected in it.

\n\n

The output is written into a git_buf which the caller must free when done (via git_buf_free).

\n\n

If no filters need to be applied, then the out buffer will just be populated with a pointer to the raw content of the blob. In that case, be careful to not free the blob until done with the buffer or copy it into memory you own.

\n", + "comments": "

This applies filters as if the blob was being checked out to the working directory under the specified filename. This may apply CRLF filtering or other types of changes depending on the file attributes set for the blob and the content detected in it.

\n\n

The output is written into a git_buf which the caller must dispose when done (via git_buf_dispose).

\n\n

If no filters need to be applied, then the out buffer will just be populated with a pointer to the raw content of the blob. In that case, be careful to not free the blob until done with the buffer or copy it into memory you own.

\n", "group": "blob" }, - "git_blob_create_fromworkdir": { + "git_blob_create_from_workdir": { "type": "function", - "file": "blob.h", - "line": 139, - "lineto": 139, + "file": "git2/blob.h", + "line": 244, + "lineto": 244, "args": [ { "name": "id", @@ -2212,19 +2431,16 @@ ], "argline": "git_oid *id, git_repository *repo, const char *relative_path", "sig": "git_oid *::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Read a file from the working folder of a repository\n and write it to the Object Database as a loose blob

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Read a file from the working folder of a repository and write it\n to the object database.

\n", "comments": "", "group": "blob" }, - "git_blob_create_fromdisk": { + "git_blob_create_from_disk": { "type": "function", - "file": "blob.h", - "line": 151, - "lineto": 151, + "file": "git2/blob.h", + "line": 257, + "lineto": 260, "args": [ { "name": "id", @@ -2244,19 +2460,16 @@ ], "argline": "git_oid *id, git_repository *repo, const char *path", "sig": "git_oid *::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Read a file from the filesystem and write its content\n to the Object Database as a loose blob

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Read a file from the filesystem (not necessarily inside the\n working folder of the repository) and write it to the object\n database.

\n", "comments": "", "group": "blob" }, - "git_blob_create_fromstream": { + "git_blob_create_from_stream": { "type": "function", - "file": "blob.h", - "line": 178, - "lineto": 181, + "file": "git2/blob.h", + "line": 287, + "lineto": 290, "args": [ { "name": "out", @@ -2276,19 +2489,16 @@ ], "argline": "git_writestream **out, git_repository *repo, const char *hintpath", "sig": "git_writestream **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or error code" - }, - "description": "

Create a stream to write a new blob into the object db

\n", - "comments": "

This function may need to buffer the data on disk and will in general not be the right choice if you know the size of the data to write. If you have data in memory, use git_blob_create_frombuffer(). If you do not, but know the size of the contents (and don't want/need to perform filtering), use git_odb_open_wstream().

\n\n

Don't close this stream yourself but pass it to git_blob_create_fromstream_commit() to commit the write to the object db and get the object id.

\n\n

If the hintpath parameter is filled, it will be used to determine what git filters should be applied to the object before it is written to the object database.

\n", + "return": { "type": "int", "comment": " 0 or error code" }, + "description": "

Create a stream to write a new blob into the object database.

\n", + "comments": "

This function may need to buffer the data on disk and will in general not be the right choice if you know the size of the data to write. If you have data in memory, use git_blob_create_from_buffer(). If you do not, but know the size of the contents (and don't want/need to perform filtering), use git_odb_open_wstream().

\n\n

Don't close this stream yourself but pass it to git_blob_create_from_stream_commit() to commit the write to the object db and get the object id.

\n\n

If the hintpath parameter is filled, it will be used to determine what git filters should be applied to the object before it is written to the object database.

\n", "group": "blob" }, - "git_blob_create_fromstream_commit": { + "git_blob_create_from_stream_commit": { "type": "function", - "file": "blob.h", - "line": 192, - "lineto": 194, + "file": "git2/blob.h", + "line": 301, + "lineto": 303, "args": [ { "name": "out", @@ -2303,19 +2513,16 @@ ], "argline": "git_oid *out, git_writestream *stream", "sig": "git_oid *::git_writestream *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Close the stream and write the blob to the object db

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Close the stream and finalize writing the blob to the object database.

\n", "comments": "

The stream will be closed and freed.

\n", "group": "blob" }, - "git_blob_create_frombuffer": { + "git_blob_create_from_buffer": { "type": "function", - "file": "blob.h", - "line": 205, - "lineto": 206, + "file": "git2/blob.h", + "line": 314, + "lineto": 315, "args": [ { "name": "id", @@ -2325,34 +2532,27 @@ { "name": "repo", "type": "git_repository *", - "comment": "repository where to blob will be written" + "comment": "repository where the blob will be written" }, { "name": "buffer", "type": "const void *", "comment": "data to be written into the blob" }, - { - "name": "len", - "type": "size_t", - "comment": "length of the data" - } + { "name": "len", "type": "size_t", "comment": "length of the data" } ], "argline": "git_oid *id, git_repository *repo, const void *buffer, size_t len", "sig": "git_oid *::git_repository *::const void *::size_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Write an in-memory buffer to the ODB as a blob

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Write an in-memory buffer to the object database as a blob.

\n", "comments": "", "group": "blob" }, "git_blob_is_binary": { "type": "function", - "file": "blob.h", - "line": 219, - "lineto": 219, + "file": "git2/blob.h", + "line": 328, + "lineto": 328, "args": [ { "name": "blob", @@ -2364,17 +2564,40 @@ "sig": "const git_blob *", "return": { "type": "int", - "comment": " 1 if the content of the blob is detected\n as binary; 0 otherwise." + "comment": " \n\n bool 1 if the content of the blob is detected\n as binary; 0 otherwise." }, - "description": "

Determine if the blob content is most certainly binary or not.

\n", + "description": "

Determine if the blob content is most likely binary or not.

\n", "comments": "

The heuristic used to guess if a file is binary is taken from core git: Searching for NUL bytes and looking for a reasonable ratio of printable to non-printable characters among the first 8000 bytes.

\n", "group": "blob" }, + "git_blob_data_is_binary": { + "type": "function", + "file": "git2/blob.h", + "line": 340, + "lineto": 340, + "args": [ + { + "name": "data", + "type": "const char *", + "comment": "The blob data which content should be analyzed" + }, + { "name": "len", "type": "size_t", "comment": "The length of the data" } + ], + "argline": "const char *data, size_t len", + "sig": "const char *::size_t", + "return": { + "type": "int", + "comment": " 1 if the content of the blob is detected\n as binary; 0 otherwise." + }, + "description": "

Determine if the given content is most certainly binary or not;\n this is the same mechanism used by git_blob_is_binary but only\n looking at raw data.

\n", + "comments": "", + "group": "blob" + }, "git_blob_dup": { "type": "function", - "file": "blob.h", - "line": 228, - "lineto": 228, + "file": "git2/blob.h", + "line": 350, + "lineto": 350, "args": [ { "name": "out", @@ -2389,19 +2612,16 @@ ], "argline": "git_blob **out, git_blob *source", "sig": "git_blob **::git_blob *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0." }, "description": "

Create an in-memory copy of a blob. The copy must be explicitly\n free'd or it will leak.

\n", "comments": "", "group": "blob" }, "git_branch_create": { "type": "function", - "file": "branch.h", - "line": 50, - "lineto": 55, + "file": "git2/branch.h", + "line": 53, + "lineto": 58, "args": [ { "name": "out", @@ -2411,17 +2631,17 @@ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "the repository to create the branch in." }, { "name": "branch_name", "type": "const char *", - "comment": "Name for the branch; this name is\n validated for consistency. It should also not conflict with\n an already existing branch name." + "comment": "Name for the branch; this name is\n validated for consistency. It should also not conflict with\n an already existing branch name." }, { "name": "target", "type": "const git_commit *", - "comment": "Commit to which this branch should point. This object\n must belong to the given `repo`." + "comment": "Commit to which this branch should point. This object\n must belong to the given `repo`." }, { "name": "force", @@ -2441,51 +2661,56 @@ }, "git_branch_create_from_annotated": { "type": "function", - "file": "branch.h", - "line": 68, - "lineto": 73, + "file": "git2/branch.h", + "line": 77, + "lineto": 82, "args": [ { "name": "ref_out", "type": "git_reference **", - "comment": null + "comment": "Pointer where to store the underlying reference." }, { - "name": "repository", + "name": "repo", "type": "git_repository *", - "comment": null + "comment": "the repository to create the branch in." }, { "name": "branch_name", "type": "const char *", - "comment": null + "comment": "Name for the branch; this name is\n validated for consistency. It should also not conflict with\n an already existing branch name." }, { - "name": "commit", + "name": "target", "type": "const git_annotated_commit *", - "comment": null + "comment": "Annotated commit to which this branch should point. This\n object must belong to the given `repo`." }, { "name": "force", "type": "int", - "comment": null + "comment": "Overwrite existing branch." } ], - "argline": "git_reference **ref_out, git_repository *repository, const char *branch_name, const git_annotated_commit *commit, int force", + "argline": "git_reference **ref_out, git_repository *repo, const char *branch_name, const git_annotated_commit *target, int force", "sig": "git_reference **::git_repository *::const char *::const git_annotated_commit *::int", "return": { "type": "int", - "comment": null + "comment": " 0, GIT_EINVALIDSPEC or an error code." }, "description": "

Create a new branch pointing at a target commit

\n", - "comments": "

This behaves like git_branch_create() but takes an annotated commit, which lets you specify which extended sha syntax string was specified by a user, allowing for more exact reflog messages.

\n\n

See the documentation for git_branch_create().

\n", - "group": "branch" + "comments": "

This behaves like git_branch_create() but takes an annotated commit, which lets you specify which extended sha syntax string was specified by a user, allowing for more exact reflog messages.

\n", + "group": "branch", + "examples": { + "checkout.c": [ + "ex/v1.9.1/checkout.html#git_branch_create_from_annotated-7" + ] + } }, "git_branch_delete": { "type": "function", - "file": "branch.h", - "line": 85, - "lineto": 85, + "file": "git2/branch.h", + "line": 94, + "lineto": 94, "args": [ { "name": "branch", @@ -2500,14 +2725,14 @@ "comment": " 0 on success, or an error code." }, "description": "

Delete an existing branch reference.

\n", - "comments": "

If the branch is successfully deleted, the passed reference object will be invalidated. The reference must be freed manually by the user.

\n", + "comments": "

Note that if the deletion succeeds, the reference object will not be valid anymore, and should be freed immediately by the user using git_reference_free().

\n", "group": "branch" }, "git_branch_iterator_new": { "type": "function", - "file": "branch.h", - "line": 101, - "lineto": 104, + "file": "git2/branch.h", + "line": 110, + "lineto": 113, "args": [ { "name": "out", @@ -2527,19 +2752,16 @@ ], "argline": "git_branch_iterator **out, git_repository *repo, git_branch_t list_flags", "sig": "git_branch_iterator **::git_repository *::git_branch_t", - "return": { - "type": "int", - "comment": " 0 on success or an error code" - }, + "return": { "type": "int", "comment": " 0 on success or an error code" }, "description": "

Create an iterator which loops over the requested branches.

\n", "comments": "", "group": "branch" }, "git_branch_next": { "type": "function", - "file": "branch.h", - "line": 114, - "lineto": 114, + "file": "git2/branch.h", + "line": 123, + "lineto": 123, "args": [ { "name": "out", @@ -2569,9 +2791,9 @@ }, "git_branch_iterator_free": { "type": "function", - "file": "branch.h", - "line": 121, - "lineto": 121, + "file": "git2/branch.h", + "line": 130, + "lineto": 130, "args": [ { "name": "iter", @@ -2581,24 +2803,21 @@ ], "argline": "git_branch_iterator *iter", "sig": "git_branch_iterator *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a branch iterator

\n", "comments": "", "group": "branch" }, "git_branch_move": { "type": "function", - "file": "branch.h", - "line": 138, - "lineto": 142, + "file": "git2/branch.h", + "line": 153, + "lineto": 157, "args": [ { "name": "out", "type": "git_reference **", - "comment": null + "comment": "New reference object for the updated name." }, { "name": "branch", @@ -2623,14 +2842,14 @@ "comment": " 0 on success, GIT_EINVALIDSPEC or an error code." }, "description": "

Move/rename an existing local branch reference.

\n", - "comments": "

The new branch name will be checked for validity. See git_tag_create() for rules about valid names.

\n", + "comments": "

The new branch name will be checked for validity. See git_tag_create() for rules about valid names.

\n\n

Note that if the move succeeds, the old reference object will not be valid anymore, and should be freed immediately by the user using git_reference_free().

\n", "group": "branch" }, "git_branch_lookup": { "type": "function", - "file": "branch.h", - "line": 165, - "lineto": 169, + "file": "git2/branch.h", + "line": 177, + "lineto": 181, "args": [ { "name": "out", @@ -2660,51 +2879,47 @@ "comment": " 0 on success; GIT_ENOTFOUND when no matching branch\n exists, GIT_EINVALIDSPEC, otherwise an error code." }, "description": "

Lookup a branch by its name in a repository.

\n", - "comments": "

The generated reference must be freed by the user.

\n\n

The branch name will be checked for validity. See git_tag_create() for rules about valid names.

\n", + "comments": "

The generated reference must be freed by the user. The branch name will be checked for validity.

\n", "group": "branch" }, "git_branch_name": { "type": "function", - "file": "branch.h", - "line": 186, - "lineto": 188, + "file": "git2/branch.h", + "line": 198, + "lineto": 200, "args": [ { "name": "out", "type": "const char **", - "comment": "where the pointer of branch name is stored;\n this is valid as long as the ref is not freed." + "comment": "Pointer to the abbreviated reference name.\n Owned by ref, do not free." }, { "name": "ref", "type": "const git_reference *", - "comment": "the reference ideally pointing to a branch" + "comment": "A reference object, ideally pointing to a branch" } ], "argline": "const char **out, const git_reference *ref", "sig": "const char **::const git_reference *", "return": { "type": "int", - "comment": " 0 on success; otherwise an error code (e.g., if the\n ref is no local or remote branch)." + "comment": " 0 on success; GIT_EINVALID if the reference isn't either a local or\n remote branch, otherwise an error code." }, - "description": "

Return the name of the given local or remote branch.

\n", - "comments": "

The name of the branch matches the definition of the name for git_branch_lookup. That is, if the returned name is given to git_branch_lookup() then the reference is returned that was given to this function.

\n", + "description": "

Get the branch name

\n", + "comments": "

Given a reference object, this will check that it really is a branch (ie. it lives under "refs/heads/" or "refs/remotes/"), and return the branch part of it.

\n", "group": "branch", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_branch_name-6" - ] - } + "examples": { "merge.c": ["ex/v1.9.1/merge.html#git_branch_name-4"] } }, "git_branch_upstream": { "type": "function", - "file": "branch.h", - "line": 202, - "lineto": 204, + "file": "git2/branch.h", + "line": 216, + "lineto": 218, "args": [ { "name": "out", "type": "git_reference **", - "comment": "Pointer where to store the retrieved\n reference." + "comment": "Pointer where to store the retrieved reference." }, { "name": "branch", @@ -2716,17 +2931,17 @@ "sig": "git_reference **::const git_reference *", "return": { "type": "int", - "comment": " 0 on success; GIT_ENOTFOUND when no remote tracking\n reference exists, otherwise an error code." + "comment": " 0 on success; GIT_ENOTFOUND when no remote tracking\n reference exists, otherwise an error code." }, - "description": "

Return the reference supporting the remote tracking branch,\n given a local branch reference.

\n", - "comments": "", + "description": "

Get the upstream of a branch

\n", + "comments": "

Given a reference, this will return a new reference object corresponding to its remote tracking branch. The reference must be a local branch.

\n", "group": "branch" }, "git_branch_set_upstream": { "type": "function", - "file": "branch.h", - "line": 216, - "lineto": 216, + "file": "git2/branch.h", + "line": 235, + "lineto": 237, "args": [ { "name": "branch", @@ -2734,216 +2949,252 @@ "comment": "the branch to configure" }, { - "name": "upstream_name", + "name": "branch_name", "type": "const char *", - "comment": "remote-tracking or local branch to set as\n upstream. Pass NULL to unset." + "comment": "remote-tracking or local branch to set as upstream." } ], - "argline": "git_reference *branch, const char *upstream_name", + "argline": "git_reference *branch, const char *branch_name", "sig": "git_reference *::const char *", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " \n\n git_error_t 0 on success; GIT_ENOTFOUND if there's no branch named `branch_name`\n or an error code" }, - "description": "

Set the upstream configuration for a given local branch

\n", - "comments": "", + "description": "

Set a branch's upstream branch

\n", + "comments": "

This will update the configuration to set the branch named branch_name as the upstream of branch. Pass a NULL name to unset the upstream information.

\n", + "group": "branch" + }, + "git_branch_upstream_name": { + "type": "function", + "file": "git2/branch.h", + "line": 253, + "lineto": 256, + "args": [ + { + "name": "out", + "type": "git_buf *", + "comment": "the buffer into which the name will be written." + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository where the branches live." + }, + { + "name": "refname", + "type": "const char *", + "comment": "reference name of the local branch." + } + ], + "argline": "git_buf *out, git_repository *repo, const char *refname", + "sig": "git_buf *::git_repository *::const char *", + "return": { + "type": "int", + "comment": " 0 on success, GIT_ENOTFOUND when no remote tracking reference exists,\n or an error code." + }, + "description": "

Get the upstream name of a branch

\n", + "comments": "

Given a local branch, this will return its remote-tracking branch information, as a full reference name, ie. "feature/nice" would become "refs/remote/origin/feature/nice", depending on that branch's configuration.

\n", "group": "branch" }, "git_branch_is_head": { "type": "function", - "file": "branch.h", - "line": 245, - "lineto": 246, + "file": "git2/branch.h", + "line": 266, + "lineto": 267, "args": [ { "name": "branch", "type": "const git_reference *", - "comment": "Current underlying reference of the branch." + "comment": "A reference to a local branch." } ], "argline": "const git_reference *branch", "sig": "const git_reference *", "return": { "type": "int", - "comment": " 1 if HEAD points at the branch, 0 if it isn't,\n error code otherwise." + "comment": " 1 if HEAD points at the branch, 0 if it isn't, or a negative value\n \t\t as an error code." }, - "description": "

Determine if the current local branch is pointed at by HEAD.

\n", + "description": "

Determine if HEAD points to the given branch

\n", "comments": "", "group": "branch" }, "git_branch_is_checked_out": { "type": "function", - "file": "branch.h", - "line": 257, - "lineto": 258, + "file": "git2/branch.h", + "line": 279, + "lineto": 280, "args": [ { "name": "branch", "type": "const git_reference *", - "comment": "Reference to the branch." + "comment": "A reference to a local branch." } ], "argline": "const git_reference *branch", "sig": "const git_reference *", "return": { "type": "int", - "comment": " 1 if branch is checked out, 0 if it isn't,\n error code otherwise." + "comment": " 1 if branch is checked out, 0 if it isn't, an error code otherwise." }, - "description": "

Determine if the current branch is checked out in any linked\n repository.

\n", - "comments": "", + "description": "

Determine if any HEAD points to the current branch

\n", + "comments": "

This will iterate over all known linked repositories (usually in the form of worktrees) and report whether any HEAD is pointing at the current branch.

\n", "group": "branch" }, - "git_buf_free": { + "git_branch_remote_name": { "type": "function", - "file": "buffer.h", - "line": 72, - "lineto": 72, + "file": "git2/branch.h", + "line": 298, + "lineto": 301, "args": [ { - "name": "buffer", + "name": "out", "type": "git_buf *", - "comment": "The buffer to deallocate" + "comment": "The buffer into which the name will be written." + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository where the branch lives." + }, + { + "name": "refname", + "type": "const char *", + "comment": "complete name of the remote tracking branch." } ], - "argline": "git_buf *buffer", - "sig": "git_buf *", + "argline": "git_buf *out, git_repository *repo, const char *refname", + "sig": "git_buf *::git_repository *::const char *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 on success, GIT_ENOTFOUND when no matching remote was found,\n GIT_EAMBIGUOUS when the branch maps to several remotes,\n otherwise an error code." }, - "description": "

Free the memory referred to by the git_buf.

\n", - "comments": "

Note that this does not free the git_buf itself, just the memory pointed to by buffer->ptr. This will not free the memory if it looks like it was not allocated internally, but it will clear the buffer back to the empty state.

\n", - "group": "buf", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_buf_free-1" - ], - "remote.c": [ - "ex/HEAD/remote.html#git_buf_free-1" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_buf_free-1" - ] - } + "description": "

Find the remote name of a remote-tracking branch

\n", + "comments": "

This will return the name of the remote whose fetch refspec is matching the given branch. E.g. given a branch "refs/remotes/test/master", it will extract the "test" part. If refspecs from multiple remotes match, the function will return GIT_EAMBIGUOUS.

\n", + "group": "branch" }, - "git_buf_grow": { + "git_branch_upstream_remote": { "type": "function", - "file": "buffer.h", - "line": 95, - "lineto": 95, + "file": "git2/branch.h", + "line": 314, + "lineto": 314, "args": [ { - "name": "buffer", + "name": "buf", "type": "git_buf *", - "comment": "The buffer to be resized; may or may not be allocated yet" + "comment": "the buffer into which to write the name" }, { - "name": "target_size", - "type": "size_t", - "comment": "The desired available size" + "name": "repo", + "type": "git_repository *", + "comment": "the repository in which to look" + }, + { + "name": "refname", + "type": "const char *", + "comment": "the full name of the branch" } ], - "argline": "git_buf *buffer, size_t target_size", - "sig": "git_buf *::size_t", - "return": { - "type": "int", - "comment": " 0 on success, -1 on allocation failure" - }, - "description": "

Resize the buffer allocation to make more space.

\n", - "comments": "

This will attempt to grow the buffer to accommodate the target size.

\n\n

If the buffer refers to memory that was not allocated by libgit2 (i.e. the asize field is zero), then ptr will be replaced with a newly allocated block of data. Be careful so that memory allocated by the caller is not lost. As a special variant, if you pass target_size as 0 and the memory is not allocated by libgit2, this will allocate a new buffer of size size and copy the external data into it.

\n\n

Currently, this will never shrink a buffer, only expand it.

\n\n

If the allocation fails, this will return an error and the buffer will be marked as invalid for future operations, invaliding the contents.

\n", - "group": "buf" + "argline": "git_buf *buf, git_repository *repo, const char *refname", + "sig": "git_buf *::git_repository *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Retrieve the upstream remote of a local branch

\n", + "comments": "

This will return the currently configured "branch.*.remote" for a given branch. This branch must be local.

\n", + "group": "branch" }, - "git_buf_set": { + "git_branch_upstream_merge": { "type": "function", - "file": "buffer.h", - "line": 105, - "lineto": 106, + "file": "git2/branch.h", + "line": 327, + "lineto": 327, "args": [ { - "name": "buffer", + "name": "buf", "type": "git_buf *", - "comment": "The buffer to set" + "comment": "the buffer into which to write the name" }, { - "name": "data", - "type": "const void *", - "comment": "The data to copy into the buffer" + "name": "repo", + "type": "git_repository *", + "comment": "the repository in which to look" }, { - "name": "datalen", - "type": "size_t", - "comment": "The length of the data to copy into the buffer" + "name": "refname", + "type": "const char *", + "comment": "the full name of the branch" } ], - "argline": "git_buf *buffer, const void *data, size_t datalen", - "sig": "git_buf *::const void *::size_t", - "return": { - "type": "int", - "comment": " 0 on success, -1 on allocation failure" - }, - "description": "

Set buffer to a copy of some raw data.

\n", - "comments": "", - "group": "buf" + "argline": "git_buf *buf, git_repository *repo, const char *refname", + "sig": "git_buf *::git_repository *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Retrieve the upstream merge of a local branch

\n", + "comments": "

This will return the currently configured "branch.*.merge" for a given branch. This branch must be local.

\n", + "group": "branch" }, - "git_buf_is_binary": { + "git_branch_name_is_valid": { "type": "function", - "file": "buffer.h", - "line": 114, - "lineto": 114, + "file": "git2/branch.h", + "line": 339, + "lineto": 339, "args": [ { - "name": "buf", - "type": "const git_buf *", - "comment": "Buffer to check" + "name": "valid", + "type": "int *", + "comment": "output pointer to set with validity of given branch name" + }, + { + "name": "name", + "type": "const char *", + "comment": "a branch name to test" } ], - "argline": "const git_buf *buf", - "sig": "const git_buf *", - "return": { - "type": "int", - "comment": " 1 if buffer looks like non-text data" - }, - "description": "

Check quickly if buffer looks like it contains binary data

\n", + "argline": "int *valid, const char *name", + "sig": "int *::const char *", + "return": { "type": "int", "comment": " 0 on success or an error code" }, + "description": "

Determine whether a branch name is valid, meaning that (when prefixed\n with refs/heads/) that it is a valid reference name, and that any\n additional branch name restrictions are imposed (eg, it cannot start\n with a -).

\n", "comments": "", - "group": "buf" + "group": "branch" }, - "git_buf_contains_nul": { + "git_buf_dispose": { "type": "function", - "file": "buffer.h", - "line": 122, - "lineto": 122, + "file": "git2/buffer.h", + "line": 71, + "lineto": 71, "args": [ { - "name": "buf", - "type": "const git_buf *", - "comment": "Buffer to check" + "name": "buffer", + "type": "git_buf *", + "comment": "The buffer to deallocate" } ], - "argline": "const git_buf *buf", - "sig": "const git_buf *", - "return": { - "type": "int", - "comment": " 1 if buffer contains a NUL byte" - }, - "description": "

Check quickly if buffer contains a NUL byte

\n", - "comments": "", - "group": "buf" + "argline": "git_buf *buffer", + "sig": "git_buf *", + "return": { "type": "void", "comment": null }, + "description": "

Free the memory referred to by the git_buf.

\n", + "comments": "

Note that this does not free the git_buf itself, just the memory pointed to by buffer->ptr.

\n", + "group": "buf", + "examples": { + "diff.c": [ + "ex/v1.9.1/diff.html#git_buf_dispose-1", + "ex/v1.9.1/diff.html#git_buf_dispose-2" + ], + "tag.c": ["ex/v1.9.1/tag.html#git_buf_dispose-1"] + } }, - "git_checkout_init_options": { + "git_checkout_options_init": { "type": "function", - "file": "checkout.h", - "line": 308, - "lineto": 310, + "file": "git2/checkout.h", + "line": 410, + "lineto": 412, "args": [ { "name": "opts", "type": "git_checkout_options *", - "comment": "the `git_checkout_options` struct to initialize." + "comment": "The `git_checkout_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version of struct; pass `GIT_CHECKOUT_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_CHECKOUT_OPTIONS_VERSION`." } ], "argline": "git_checkout_options *opts, unsigned int version", @@ -2952,15 +3203,15 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_checkout_options with default values. Equivalent to\n creating an instance with GIT_CHECKOUT_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_checkout_options structure

\n", + "comments": "

Initializes a git_checkout_options with default values. Equivalent to creating an instance with GIT_CHECKOUT_OPTIONS_INIT.

\n", "group": "checkout" }, "git_checkout_head": { "type": "function", - "file": "checkout.h", - "line": 329, - "lineto": 331, + "file": "git2/checkout.h", + "line": 431, + "lineto": 433, "args": [ { "name": "repo", @@ -2977,7 +3228,7 @@ "sig": "git_repository *::const git_checkout_options *", "return": { "type": "int", - "comment": " 0 on success, GIT_EUNBORNBRANCH if HEAD points to a non\n existing branch, non-zero value returned by `notify_cb`, or\n other error code \n<\n 0 (use giterr_last for error details)" + "comment": " 0 on success, GIT_EUNBORNBRANCH if HEAD points to a non\n existing branch, non-zero value returned by `notify_cb`, or\n other error code \n<\n 0 (use git_error_last for error details)" }, "description": "

Updates files in the index and the working tree to match the content of\n the commit pointed at by HEAD.

\n", "comments": "

Note that this is not the correct mechanism used to switch branches; do not change your HEAD and then call this method, that would leave you with checkout conflicts since your working directory would then appear to be dirty. Instead, checkout the target of the branch and then update HEAD using git_repository_set_head to point to the branch you checked out.

\n", @@ -2985,9 +3236,9 @@ }, "git_checkout_index": { "type": "function", - "file": "checkout.h", - "line": 342, - "lineto": 345, + "file": "git2/checkout.h", + "line": 444, + "lineto": 447, "args": [ { "name": "repo", @@ -3009,7 +3260,7 @@ "sig": "git_repository *::git_index *::const git_checkout_options *", "return": { "type": "int", - "comment": " 0 on success, non-zero return value from `notify_cb`, or error\n code \n<\n 0 (use giterr_last for error details)" + "comment": " 0 on success, non-zero return value from `notify_cb`, or error\n code \n<\n 0 (use git_error_last for error details)" }, "description": "

Updates files in the working tree to match the content of the index.

\n", "comments": "", @@ -3017,9 +3268,9 @@ }, "git_checkout_tree": { "type": "function", - "file": "checkout.h", - "line": 358, - "lineto": 361, + "file": "git2/checkout.h", + "line": 460, + "lineto": 463, "args": [ { "name": "repo", @@ -3041,32 +3292,31 @@ "sig": "git_repository *::const git_object *::const git_checkout_options *", "return": { "type": "int", - "comment": " 0 on success, non-zero return value from `notify_cb`, or error\n code \n<\n 0 (use giterr_last for error details)" + "comment": " 0 on success, non-zero return value from `notify_cb`, or error\n code \n<\n 0 (use git_error_last for error details)" }, "description": "

Updates files in the index and working tree to match the content of the\n tree pointed at by the treeish.

\n", "comments": "", "group": "checkout", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_checkout_tree-7" - ] + "checkout.c": ["ex/v1.9.1/checkout.html#git_checkout_tree-8"], + "merge.c": ["ex/v1.9.1/merge.html#git_checkout_tree-5"] } }, - "git_cherrypick_init_options": { + "git_cherrypick_options_init": { "type": "function", - "file": "cherrypick.h", - "line": 47, - "lineto": 49, + "file": "git2/cherrypick.h", + "line": 57, + "lineto": 59, "args": [ { "name": "opts", "type": "git_cherrypick_options *", - "comment": "the `git_cherrypick_options` struct to initialize" + "comment": "The `git_cherrypick_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version of struct; pass `GIT_CHERRYPICK_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_CHERRYPICK_OPTIONS_VERSION`." } ], "argline": "git_cherrypick_options *opts, unsigned int version", @@ -3075,15 +3325,15 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_cherrypick_options with default values. Equivalent to\n creating an instance with GIT_CHERRYPICK_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_cherrypick_options structure

\n", + "comments": "

Initializes a git_cherrypick_options with default values. Equivalent to creating an instance with GIT_CHERRYPICK_OPTIONS_INIT.

\n", "group": "cherrypick" }, "git_cherrypick_commit": { "type": "function", - "file": "cherrypick.h", - "line": 65, - "lineto": 71, + "file": "git2/cherrypick.h", + "line": 75, + "lineto": 81, "args": [ { "name": "out", @@ -3103,12 +3353,12 @@ { "name": "our_commit", "type": "git_commit *", - "comment": "the commit to revert against (eg, HEAD)" + "comment": "the commit to cherry-pick against (eg, HEAD)" }, { "name": "mainline", "type": "unsigned int", - "comment": "the parent of the revert commit, if it is a merge" + "comment": "the parent of the `cherrypick_commit`, if it is a merge" }, { "name": "merge_options", @@ -3128,9 +3378,9 @@ }, "git_cherrypick": { "type": "function", - "file": "cherrypick.h", - "line": 81, - "lineto": 84, + "file": "git2/cherrypick.h", + "line": 91, + "lineto": 94, "args": [ { "name": "repo", @@ -3158,21 +3408,21 @@ "comments": "", "group": "cherrypick" }, - "git_clone_init_options": { + "git_clone_options_init": { "type": "function", - "file": "clone.h", - "line": 179, - "lineto": 181, + "file": "git2/clone.h", + "line": 192, + "lineto": 194, "args": [ { "name": "opts", "type": "git_clone_options *", - "comment": "The `git_clone_options` struct to initialize" + "comment": "The `git_clone_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version of struct; pass `GIT_CLONE_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_CLONE_OPTIONS_VERSION`." } ], "argline": "git_clone_options *opts, unsigned int version", @@ -3181,15 +3431,15 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_clone_options with default values. Equivalent to\n creating an instance with GIT_CLONE_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_clone_options structure

\n", + "comments": "

Initializes a git_clone_options with default values. Equivalent to creating an instance with GIT_CLONE_OPTIONS_INIT.

\n", "group": "clone" }, "git_clone": { "type": "function", - "file": "clone.h", - "line": 199, - "lineto": 203, + "file": "git2/clone.h", + "line": 216, + "lineto": 220, "args": [ { "name": "out", @@ -3216,22 +3466,17 @@ "sig": "git_repository **::const char *::const char *::const git_clone_options *", "return": { "type": "int", - "comment": " 0 on success, any non-zero return value from a callback\n function, or a negative value to indicate an error (use\n `giterr_last` for a detailed error message)" + "comment": " 0 on success, any non-zero return value from a callback\n function, or a negative value to indicate an error (use\n `git_error_last` for a detailed error message)" }, "description": "

Clone a remote repository.

\n", - "comments": "

By default this creates its repository and initial remote to match git's defaults. You can use the options in the callback to customize how these are created.

\n", - "group": "clone", - "examples": { - "network/clone.c": [ - "ex/HEAD/network/clone.html#git_clone-1" - ] - } + "comments": "

By default this creates its repository and initial remote to match git's defaults. You can use the options in the callback to customize how these are created.

\n\n

Note that the libgit2 library must be initialized using git_libgit2_init before any APIs can be called, including this one.

\n", + "group": "clone" }, "git_commit_lookup": { "type": "function", - "file": "commit.h", - "line": 36, - "lineto": 37, + "file": "git2/commit.h", + "line": 40, + "lineto": 41, "args": [ { "name": "commit", @@ -3251,32 +3496,26 @@ ], "argline": "git_commit **commit, git_repository *repo, const git_oid *id", "sig": "git_commit **::git_repository *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a commit object from a repository.

\n", "comments": "

The returned object should be released with git_commit_free when no longer needed.

\n", "group": "commit", "examples": { + "checkout.c": ["ex/v1.9.1/checkout.html#git_commit_lookup-9"], "general.c": [ - "ex/HEAD/general.html#git_commit_lookup-6", - "ex/HEAD/general.html#git_commit_lookup-7", - "ex/HEAD/general.html#git_commit_lookup-8" + "ex/v1.9.1/general.html#git_commit_lookup-6", + "ex/v1.9.1/general.html#git_commit_lookup-7", + "ex/v1.9.1/general.html#git_commit_lookup-8" ], - "log.c": [ - "ex/HEAD/log.html#git_commit_lookup-1" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_commit_lookup-8" - ] + "log.c": ["ex/v1.9.1/log.html#git_commit_lookup-1"], + "merge.c": ["ex/v1.9.1/merge.html#git_commit_lookup-6"] } }, "git_commit_lookup_prefix": { "type": "function", - "file": "commit.h", - "line": 55, - "lineto": 56, + "file": "git2/commit.h", + "line": 59, + "lineto": 60, "args": [ { "name": "commit", @@ -3301,19 +3540,16 @@ ], "argline": "git_commit **commit, git_repository *repo, const git_oid *id, size_t len", "sig": "git_commit **::git_repository *::const git_oid *::size_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a commit object from a repository, given a prefix of its\n identifier (short id).

\n", "comments": "

The returned object should be released with git_commit_free when no longer needed.

\n", "group": "commit" }, "git_commit_free": { "type": "function", - "file": "commit.h", - "line": 70, - "lineto": 70, + "file": "git2/commit.h", + "line": 74, + "lineto": 74, "args": [ { "name": "commit", @@ -3323,34 +3559,32 @@ ], "argline": "git_commit *commit", "sig": "git_commit *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an open commit

\n", "comments": "

This is a wrapper around git_object_free()

\n\n

IMPORTANT: It is necessary to call this method when you stop using a commit. Failure to do so will cause a memory leak.

\n", "group": "commit", "examples": { + "checkout.c": ["ex/v1.9.1/checkout.html#git_commit_free-10"], "general.c": [ - "ex/HEAD/general.html#git_commit_free-9", - "ex/HEAD/general.html#git_commit_free-10", - "ex/HEAD/general.html#git_commit_free-11", - "ex/HEAD/general.html#git_commit_free-12", - "ex/HEAD/general.html#git_commit_free-13" + "ex/v1.9.1/general.html#git_commit_free-9", + "ex/v1.9.1/general.html#git_commit_free-10", + "ex/v1.9.1/general.html#git_commit_free-11", + "ex/v1.9.1/general.html#git_commit_free-12", + "ex/v1.9.1/general.html#git_commit_free-13" ], "log.c": [ - "ex/HEAD/log.html#git_commit_free-2", - "ex/HEAD/log.html#git_commit_free-3", - "ex/HEAD/log.html#git_commit_free-4", - "ex/HEAD/log.html#git_commit_free-5" + "ex/v1.9.1/log.html#git_commit_free-2", + "ex/v1.9.1/log.html#git_commit_free-3", + "ex/v1.9.1/log.html#git_commit_free-4", + "ex/v1.9.1/log.html#git_commit_free-5" ] } }, "git_commit_id": { "type": "function", - "file": "commit.h", - "line": 78, - "lineto": 78, + "file": "git2/commit.h", + "line": 82, + "lineto": 82, "args": [ { "name": "commit", @@ -3368,19 +3602,15 @@ "comments": "", "group": "commit", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_commit_id-14" - ], - "log.c": [ - "ex/HEAD/log.html#git_commit_id-6" - ] + "general.c": ["ex/v1.9.1/general.html#git_commit_id-14"], + "log.c": ["ex/v1.9.1/log.html#git_commit_id-6"] } }, "git_commit_owner": { "type": "function", - "file": "commit.h", - "line": 86, - "lineto": 86, + "file": "git2/commit.h", + "line": 90, + "lineto": 90, "args": [ { "name": "commit", @@ -3399,16 +3629,16 @@ "group": "commit", "examples": { "log.c": [ - "ex/HEAD/log.html#git_commit_owner-7", - "ex/HEAD/log.html#git_commit_owner-8" + "ex/v1.9.1/log.html#git_commit_owner-7", + "ex/v1.9.1/log.html#git_commit_owner-8" ] } }, "git_commit_message_encoding": { "type": "function", - "file": "commit.h", - "line": 98, - "lineto": 98, + "file": "git2/commit.h", + "line": 102, + "lineto": 102, "args": [ { "name": "commit", @@ -3418,19 +3648,16 @@ ], "argline": "const git_commit *commit", "sig": "const git_commit *", - "return": { - "type": "const char *", - "comment": " NULL, or the encoding" - }, + "return": { "type": "const char *", "comment": " NULL, or the encoding" }, "description": "

Get the encoding for the message of a commit,\n as a string representing a standard encoding name.

\n", "comments": "

The encoding may be NULL if the encoding header in the commit is missing; in that case UTF-8 is assumed.

\n", "group": "commit" }, "git_commit_message": { "type": "function", - "file": "commit.h", - "line": 109, - "lineto": 109, + "file": "git2/commit.h", + "line": 113, + "lineto": 113, "args": [ { "name": "commit", @@ -3449,29 +3676,27 @@ "group": "commit", "examples": { "cat-file.c": [ - "ex/HEAD/cat-file.html#git_commit_message-3", - "ex/HEAD/cat-file.html#git_commit_message-4" + "ex/v1.9.1/cat-file.html#git_commit_message-3", + "ex/v1.9.1/cat-file.html#git_commit_message-4" ], "general.c": [ - "ex/HEAD/general.html#git_commit_message-15", - "ex/HEAD/general.html#git_commit_message-16", - "ex/HEAD/general.html#git_commit_message-17" + "ex/v1.9.1/general.html#git_commit_message-15", + "ex/v1.9.1/general.html#git_commit_message-16", + "ex/v1.9.1/general.html#git_commit_message-17" ], "log.c": [ - "ex/HEAD/log.html#git_commit_message-9", - "ex/HEAD/log.html#git_commit_message-10", - "ex/HEAD/log.html#git_commit_message-11" + "ex/v1.9.1/log.html#git_commit_message-9", + "ex/v1.9.1/log.html#git_commit_message-10", + "ex/v1.9.1/log.html#git_commit_message-11" ], - "tag.c": [ - "ex/HEAD/tag.html#git_commit_message-2" - ] + "tag.c": ["ex/v1.9.1/tag.html#git_commit_message-2"] } }, "git_commit_message_raw": { "type": "function", - "file": "commit.h", - "line": 117, - "lineto": 117, + "file": "git2/commit.h", + "line": 121, + "lineto": 121, "args": [ { "name": "commit", @@ -3491,9 +3716,9 @@ }, "git_commit_summary": { "type": "function", - "file": "commit.h", - "line": 128, - "lineto": 128, + "file": "git2/commit.h", + "line": 132, + "lineto": 132, "args": [ { "name": "commit", @@ -3513,9 +3738,9 @@ }, "git_commit_body": { "type": "function", - "file": "commit.h", - "line": 141, - "lineto": 141, + "file": "git2/commit.h", + "line": 145, + "lineto": 145, "args": [ { "name": "commit", @@ -3535,9 +3760,9 @@ }, "git_commit_time": { "type": "function", - "file": "commit.h", - "line": 149, - "lineto": 149, + "file": "git2/commit.h", + "line": 153, + "lineto": 153, "args": [ { "name": "commit", @@ -3547,25 +3772,22 @@ ], "argline": "const git_commit *commit", "sig": "const git_commit *", - "return": { - "type": "git_time_t", - "comment": " the time of a commit" - }, + "return": { "type": "git_time_t", "comment": " the time of a commit" }, "description": "

Get the commit time (i.e. committer time) of a commit.

\n", "comments": "", "group": "commit", "examples": { "general.c": [ - "ex/HEAD/general.html#git_commit_time-18", - "ex/HEAD/general.html#git_commit_time-19" + "ex/v1.9.1/general.html#git_commit_time-18", + "ex/v1.9.1/general.html#git_commit_time-19" ] } }, "git_commit_time_offset": { "type": "function", - "file": "commit.h", - "line": 157, - "lineto": 157, + "file": "git2/commit.h", + "line": 161, + "lineto": 161, "args": [ { "name": "commit", @@ -3585,9 +3807,9 @@ }, "git_commit_committer": { "type": "function", - "file": "commit.h", - "line": 165, - "lineto": 165, + "file": "git2/commit.h", + "line": 169, + "lineto": 169, "args": [ { "name": "commit", @@ -3605,22 +3827,16 @@ "comments": "", "group": "commit", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_commit_committer-5" - ], - "general.c": [ - "ex/HEAD/general.html#git_commit_committer-20" - ], - "log.c": [ - "ex/HEAD/log.html#git_commit_committer-12" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_commit_committer-5"], + "general.c": ["ex/v1.9.1/general.html#git_commit_committer-20"], + "log.c": ["ex/v1.9.1/log.html#git_commit_committer-12"] } }, "git_commit_author": { "type": "function", - "file": "commit.h", - "line": 173, - "lineto": 173, + "file": "git2/commit.h", + "line": 177, + "lineto": 177, "args": [ { "name": "commit", @@ -3638,24 +3854,80 @@ "comments": "", "group": "commit", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_commit_author-6" - ], + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_commit_author-6"], "general.c": [ - "ex/HEAD/general.html#git_commit_author-21", - "ex/HEAD/general.html#git_commit_author-22" + "ex/v1.9.1/general.html#git_commit_author-21", + "ex/v1.9.1/general.html#git_commit_author-22" ], "log.c": [ - "ex/HEAD/log.html#git_commit_author-13", - "ex/HEAD/log.html#git_commit_author-14" + "ex/v1.9.1/log.html#git_commit_author-13", + "ex/v1.9.1/log.html#git_commit_author-14" ] } }, + "git_commit_committer_with_mailmap": { + "type": "function", + "file": "git2/commit.h", + "line": 190, + "lineto": 191, + "args": [ + { + "name": "out", + "type": "git_signature **", + "comment": "a pointer to store the resolved signature." + }, + { + "name": "commit", + "type": "const git_commit *", + "comment": "a previously loaded commit." + }, + { + "name": "mailmap", + "type": "const git_mailmap *", + "comment": "the mailmap to resolve with. (may be NULL)" + } + ], + "argline": "git_signature **out, const git_commit *commit, const git_mailmap *mailmap", + "sig": "git_signature **::const git_commit *::const git_mailmap *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Get the committer of a commit, using the mailmap to map names and email\n addresses to canonical real names and email addresses.

\n", + "comments": "

Call git_signature_free to free the signature.

\n", + "group": "commit" + }, + "git_commit_author_with_mailmap": { + "type": "function", + "file": "git2/commit.h", + "line": 204, + "lineto": 205, + "args": [ + { + "name": "out", + "type": "git_signature **", + "comment": "a pointer to store the resolved signature." + }, + { + "name": "commit", + "type": "const git_commit *", + "comment": "a previously loaded commit." + }, + { + "name": "mailmap", + "type": "const git_mailmap *", + "comment": "the mailmap to resolve with. (may be NULL)" + } + ], + "argline": "git_signature **out, const git_commit *commit, const git_mailmap *mailmap", + "sig": "git_signature **::const git_commit *::const git_mailmap *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Get the author of a commit, using the mailmap to map names and email\n addresses to canonical real names and email addresses.

\n", + "comments": "

Call git_signature_free to free the signature.

\n", + "group": "commit" + }, "git_commit_raw_header": { "type": "function", - "file": "commit.h", - "line": 181, - "lineto": 181, + "file": "git2/commit.h", + "line": 213, + "lineto": 213, "args": [ { "name": "commit", @@ -3675,9 +3947,9 @@ }, "git_commit_tree": { "type": "function", - "file": "commit.h", - "line": 190, - "lineto": 190, + "file": "git2/commit.h", + "line": 222, + "lineto": 222, "args": [ { "name": "tree_out", @@ -3692,28 +3964,25 @@ ], "argline": "git_tree **tree_out, const git_commit *commit", "sig": "git_tree **::const git_commit *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the tree pointed to by a commit.

\n", "comments": "", "group": "commit", "examples": { "log.c": [ - "ex/HEAD/log.html#git_commit_tree-15", - "ex/HEAD/log.html#git_commit_tree-16", - "ex/HEAD/log.html#git_commit_tree-17", - "ex/HEAD/log.html#git_commit_tree-18", - "ex/HEAD/log.html#git_commit_tree-19" + "ex/v1.9.1/log.html#git_commit_tree-15", + "ex/v1.9.1/log.html#git_commit_tree-16", + "ex/v1.9.1/log.html#git_commit_tree-17", + "ex/v1.9.1/log.html#git_commit_tree-18", + "ex/v1.9.1/log.html#git_commit_tree-19" ] } }, "git_commit_tree_id": { "type": "function", - "file": "commit.h", - "line": 200, - "lineto": 200, + "file": "git2/commit.h", + "line": 232, + "lineto": 232, "args": [ { "name": "commit", @@ -3731,16 +4000,14 @@ "comments": "", "group": "commit", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_commit_tree_id-7" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_commit_tree_id-7"] } }, "git_commit_parentcount": { "type": "function", - "file": "commit.h", - "line": 208, - "lineto": 208, + "file": "git2/commit.h", + "line": 240, + "lineto": 240, "args": [ { "name": "commit", @@ -3758,23 +4025,19 @@ "comments": "", "group": "commit", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_commit_parentcount-8" - ], - "general.c": [ - "ex/HEAD/general.html#git_commit_parentcount-23" - ], + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_commit_parentcount-8"], + "general.c": ["ex/v1.9.1/general.html#git_commit_parentcount-23"], "log.c": [ - "ex/HEAD/log.html#git_commit_parentcount-20", - "ex/HEAD/log.html#git_commit_parentcount-21" + "ex/v1.9.1/log.html#git_commit_parentcount-20", + "ex/v1.9.1/log.html#git_commit_parentcount-21" ] } }, "git_commit_parent": { "type": "function", - "file": "commit.h", - "line": 218, - "lineto": 221, + "file": "git2/commit.h", + "line": 250, + "lineto": 253, "args": [ { "name": "out", @@ -3794,28 +4057,23 @@ ], "argline": "git_commit **out, const git_commit *commit, unsigned int n", "sig": "git_commit **::const git_commit *::unsigned int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the specified parent of the commit.

\n", "comments": "", "group": "commit", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_commit_parent-24" - ], + "general.c": ["ex/v1.9.1/general.html#git_commit_parent-24"], "log.c": [ - "ex/HEAD/log.html#git_commit_parent-22", - "ex/HEAD/log.html#git_commit_parent-23" + "ex/v1.9.1/log.html#git_commit_parent-22", + "ex/v1.9.1/log.html#git_commit_parent-23" ] } }, "git_commit_parent_id": { "type": "function", - "file": "commit.h", - "line": 232, - "lineto": 234, + "file": "git2/commit.h", + "line": 264, + "lineto": 266, "args": [ { "name": "commit", @@ -3838,19 +4096,15 @@ "comments": "", "group": "commit", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_commit_parent_id-9" - ], - "log.c": [ - "ex/HEAD/log.html#git_commit_parent_id-24" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_commit_parent_id-9"], + "log.c": ["ex/v1.9.1/log.html#git_commit_parent_id-24"] } }, "git_commit_nth_gen_ancestor": { "type": "function", - "file": "commit.h", - "line": 250, - "lineto": 253, + "file": "git2/commit.h", + "line": 282, + "lineto": 285, "args": [ { "name": "ancestor", @@ -3880,9 +4134,9 @@ }, "git_commit_header_field": { "type": "function", - "file": "commit.h", - "line": 265, - "lineto": 265, + "file": "git2/commit.h", + "line": 297, + "lineto": 297, "args": [ { "name": "out", @@ -3912,9 +4166,9 @@ }, "git_commit_extract_signature": { "type": "function", - "file": "commit.h", - "line": 285, - "lineto": 285, + "file": "git2/commit.h", + "line": 317, + "lineto": 317, "args": [ { "name": "signature", @@ -3949,14 +4203,14 @@ "comment": " 0 on success, GIT_ENOTFOUND if the id is not for a commit\n or the commit does not have a signature." }, "description": "

Extract the signature from a commit

\n", - "comments": "

If the id is not for a commit, the error class will be GITERR_INVALID. If the commit does not have a signature, the error class will be GITERR_OBJECT.

\n", + "comments": "

If the id is not for a commit, the error class will be GIT_ERROR_INVALID. If the commit does not have a signature, the error class will be GIT_ERROR_OBJECT.

\n", "group": "commit" }, "git_commit_create": { "type": "function", - "file": "commit.h", - "line": 331, - "lineto": 341, + "file": "git2/commit.h", + "line": 363, + "lineto": 373, "args": [ { "name": "id", @@ -4018,134 +4272,164 @@ "description": "

Create new commit in the repository from a list of git_object pointers

\n", "comments": "

The message will not be cleaned up automatically. You can do that with the git_message_prettify() function.

\n", "group": "commit", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_commit_create-9" - ] - } + "examples": { "merge.c": ["ex/v1.9.1/merge.html#git_commit_create-7"] } }, "git_commit_create_v": { "type": "function", - "file": "commit.h", - "line": 357, - "lineto": 367, + "file": "git2/commit.h", + "line": 420, + "lineto": 430, "args": [ { "name": "id", "type": "git_oid *", - "comment": null + "comment": "Pointer in which to store the OID of the newly created commit" }, { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "Repository where to store the commit" }, { "name": "update_ref", "type": "const char *", - "comment": null + "comment": "If not NULL, name of the reference that\n\twill be updated to point to this commit. If the reference\n\tis not direct, it will be resolved to a direct reference.\n\tUse \"HEAD\" to update the HEAD of the current branch and\n\tmake it point to this commit. If the reference doesn't\n\texist yet, it will be created. If it does exist, the first\n\tparent must be the tip of this branch." }, { "name": "author", "type": "const git_signature *", - "comment": null + "comment": "Signature with author and author time of commit" }, { "name": "committer", "type": "const git_signature *", - "comment": null + "comment": "Signature with committer and * commit time of commit" }, { "name": "message_encoding", "type": "const char *", - "comment": null + "comment": "The encoding for the message in the\n commit, represented with a standard encoding name.\n E.g. \"UTF-8\". If NULL, no encoding header is written and\n UTF-8 is assumed." }, { "name": "message", "type": "const char *", - "comment": null + "comment": "Full message for this commit" }, { "name": "tree", "type": "const git_tree *", - "comment": null + "comment": "An instance of a `git_tree` object that will\n be used as the tree for the commit. This tree object must\n also be owned by the given `repo`." }, { "name": "parent_count", "type": "size_t", - "comment": null + "comment": "Number of parents for this commit" } ], "argline": "git_oid *id, git_repository *repo, const char *update_ref, const git_signature *author, const git_signature *committer, const char *message_encoding, const char *message, const git_tree *tree, size_t parent_count", "sig": "git_oid *::git_repository *::const char *::const git_signature *::const git_signature *::const char *::const char *::const git_tree *::size_t", "return": { "type": "int", - "comment": null + "comment": " 0 or an error code\n\tThe created commit will be written to the Object Database and\n\tthe given reference will be updated to point to it" }, "description": "

Create new commit in the repository using a variable argument list.

\n", "comments": "

The message will not be cleaned up automatically. You can do that with the git_message_prettify() function.

\n\n

The parents for the commit are specified as a variable list of pointers to const git_commit *. Note that this is a convenience method which may not be safe to export for certain languages or compilers

\n\n

All other parameters remain the same as git_commit_create().

\n", "group": "commit", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_commit_create_v-25" - ], - "init.c": [ - "ex/HEAD/init.html#git_commit_create_v-1" - ] + "commit.c": ["ex/v1.9.1/commit.html#git_commit_create_v-1"], + "general.c": ["ex/v1.9.1/general.html#git_commit_create_v-25"], + "init.c": ["ex/v1.9.1/init.html#git_commit_create_v-1"] } }, + "git_commit_create_from_stage": { + "type": "function", + "file": "git2/commit.h", + "line": 472, + "lineto": 476, + "args": [ + { + "name": "id", + "type": "git_oid *", + "comment": "pointer to store the new commit's object id" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "repository to commit changes in" + }, + { + "name": "message", + "type": "const char *", + "comment": "the commit message" + }, + { + "name": "opts", + "type": "const git_commit_create_options *", + "comment": "options for creating the commit" + } + ], + "argline": "git_oid *id, git_repository *repo, const char *message, const git_commit_create_options *opts", + "sig": "git_oid *::git_repository *::const char *::const git_commit_create_options *", + "return": { + "type": "int", + "comment": " 0 on success, GIT_EUNCHANGED if there were no changes to commit, or an error code" + }, + "description": "

Commits the staged changes in the repository; this is a near analog to\n git commit -m message.

\n", + "comments": "

By default, empty commits are not allowed.

\n", + "group": "commit" + }, "git_commit_amend": { "type": "function", - "file": "commit.h", - "line": 390, - "lineto": 398, + "file": "git2/commit.h", + "line": 528, + "lineto": 536, "args": [ { "name": "id", "type": "git_oid *", - "comment": null + "comment": "Pointer in which to store the OID of the newly created commit" }, { "name": "commit_to_amend", "type": "const git_commit *", - "comment": null + "comment": "The commit to amend" }, { "name": "update_ref", "type": "const char *", - "comment": null + "comment": "If not NULL, name of the reference that\n\twill be updated to point to this commit. If the reference\n\tis not direct, it will be resolved to a direct reference.\n\tUse \"HEAD\" to update the HEAD of the current branch and\n\tmake it point to this commit. If the reference doesn't\n\texist yet, it will be created. If it does exist, the first\n\tparent must be the tip of this branch." }, { "name": "author", "type": "const git_signature *", - "comment": null + "comment": "Signature with author and author time of commit" }, { "name": "committer", "type": "const git_signature *", - "comment": null + "comment": "Signature with committer and * commit time of commit" }, { "name": "message_encoding", "type": "const char *", - "comment": null + "comment": "The encoding for the message in the\n commit, represented with a standard encoding name.\n E.g. \"UTF-8\". If NULL, no encoding header is written and\n UTF-8 is assumed." }, { "name": "message", "type": "const char *", - "comment": null + "comment": "Full message for this commit" }, { "name": "tree", "type": "const git_tree *", - "comment": null + "comment": "An instance of a `git_tree` object that will\n be used as the tree for the commit. This tree object must\n also be owned by the given `repo`." } ], "argline": "git_oid *id, const git_commit *commit_to_amend, const char *update_ref, const git_signature *author, const git_signature *committer, const char *message_encoding, const char *message, const git_tree *tree", "sig": "git_oid *::const git_commit *::const char *::const git_signature *::const git_signature *::const char *::const char *::const git_tree *", "return": { "type": "int", - "comment": null + "comment": " 0 or an error code\n\tThe created commit will be written to the Object Database and\n\tthe given reference will be updated to point to it" }, "description": "

Amend an existing commit by replacing only non-NULL values.

\n", "comments": "

This creates a new commit that is exactly the same as the old commit, except that any non-NULL values will be updated. The new commit has the same parents as the old commit.

\n\n

The update_ref value works as in the regular git_commit_create(), updating the ref to point to the newly rewritten commit. If you want to amend a commit that is not currently the tip of the branch and then rewrite the following commits to reach a ref, pass this as NULL and update the rest of the commit chain and ref separately.

\n\n

Unlike git_commit_create(), the author, committer, message, message_encoding, and tree parameters can be NULL in which case this will use the values from the original commit_to_amend.

\n\n

All parameters have the same meanings as in git_commit_create().

\n", @@ -4153,9 +4437,9 @@ }, "git_commit_create_buffer": { "type": "function", - "file": "commit.h", - "line": 435, - "lineto": 444, + "file": "git2/commit.h", + "line": 573, + "lineto": 582, "args": [ { "name": "out", @@ -4205,19 +4489,16 @@ ], "argline": "git_buf *out, git_repository *repo, const git_signature *author, const git_signature *committer, const char *message_encoding, const char *message, const git_tree *tree, size_t parent_count, const git_commit *[] parents", "sig": "git_buf *::git_repository *::const git_signature *::const git_signature *::const char *::const char *::const git_tree *::size_t::const git_commit *[]", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a commit and write it into a buffer

\n", "comments": "

Create a commit as with git_commit_create() but instead of writing it to the objectdb, write the contents of the object into a buffer.

\n", "group": "commit" }, "git_commit_create_with_signature": { "type": "function", - "file": "commit.h", - "line": 460, - "lineto": 465, + "file": "git2/commit.h", + "line": 600, + "lineto": 605, "args": [ { "name": "out", @@ -4227,7 +4508,7 @@ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "the repository to create the commit in." }, { "name": "commit_content", @@ -4237,7 +4518,7 @@ { "name": "signature", "type": "const char *", - "comment": "the signature to add to the commit" + "comment": "the signature to add to the commit. Leave `NULL`\n to create a commit without adding a signature field." }, { "name": "signature_field", @@ -4247,19 +4528,16 @@ ], "argline": "git_oid *out, git_repository *repo, const char *commit_content, const char *signature, const char *signature_field", "sig": "git_oid *::git_repository *::const char *::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a commit object from the given buffer and signature

\n", "comments": "

Given the unsigned commit object's contents, its signature and the header field in which to store the signature, attach the signature to the commit and write it into the given repository.

\n", "group": "commit" }, "git_commit_dup": { "type": "function", - "file": "commit.h", - "line": 474, - "lineto": 474, + "file": "git2/commit.h", + "line": 615, + "lineto": 615, "args": [ { "name": "out", @@ -4274,19 +4552,35 @@ ], "argline": "git_commit **out, git_commit *source", "sig": "git_commit **::git_commit *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0" }, "description": "

Create an in-memory copy of a commit. The copy must be explicitly\n free'd or it will leak.

\n", "comments": "", "group": "commit" }, + "git_commitarray_dispose": { + "type": "function", + "file": "git2/commit.h", + "line": 670, + "lineto": 670, + "args": [ + { + "name": "array", + "type": "git_commitarray *", + "comment": "The git_commitarray that contains commits to free" + } + ], + "argline": "git_commitarray *array", + "sig": "git_commitarray *", + "return": { "type": "void", "comment": null }, + "description": "

Free the commits contained in a commit array. This method should\n be called on git_commitarray objects that were provided by the\n library. Not doing so will result in a memory leak.

\n", + "comments": "

This does not free the git_commitarray itself, since the library will never allocate that object directly itself.

\n", + "group": "commitarray" + }, "git_libgit2_version": { "type": "function", - "file": "common.h", - "line": 105, - "lineto": 105, + "file": "git2/common.h", + "line": 119, + "lineto": 119, "args": [ { "name": "major", @@ -4307,18 +4601,34 @@ "argline": "int *major, int *minor, int *rev", "sig": "int *::int *::int *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 on success or an error code on failure" }, "description": "

Return the version of the libgit2 library\n being currently used.

\n", "comments": "", "group": "libgit2" }, + "git_libgit2_prerelease": { + "type": "function", + "file": "git2/common.h", + "line": 130, + "lineto": 130, + "args": [], + "argline": "", + "sig": "", + "return": { + "type": "const char *", + "comment": " the name of the prerelease state or NULL" + }, + "description": "

Return the prerelease state of the libgit2 library currently being\n used. For nightly builds during active development, this will be\n "alpha". Releases may have a "beta" or release candidate ("rc1",\n "rc2", etc) prerelease. For a final release, this function returns\n NULL.

\n", + "comments": "", + "group": "libgit2" + }, "git_libgit2_features": { "type": "function", - "file": "common.h", - "line": 154, - "lineto": 154, + "file": "git2/common.h", + "line": 184, + "lineto": 184, "args": [], "argline": "", "sig": "", @@ -4327,21 +4637,37 @@ "comment": " A combination of GIT_FEATURE_* values." }, "description": "

Query compile time options for libgit2.

\n", - "comments": "\n", + "comments": "", "group": "libgit2" }, - "git_libgit2_opts": { + "git_libgit2_feature_backend": { "type": "function", - "file": "common.h", - "line": 352, - "lineto": 352, - "args": [ + "file": "git2/common.h", + "line": 205, + "lineto": 206, + "args": [ { - "name": "option", - "type": "int", - "comment": "Option key" + "name": "feature", + "type": "git_feature_t", + "comment": "the feature to query details for" } ], + "argline": "git_feature_t feature", + "sig": "git_feature_t", + "return": { + "type": "const char *", + "comment": " the provider details, or NULL if the feature is not supported" + }, + "description": "

Query the backend details for the compile-time feature in libgit2.

\n", + "comments": "

This will return the "backend" for the feature, which is useful for things like HTTPS or SSH support, that can have multiple backends that could be compiled in.

\n\n

For example, when libgit2 is compiled with dynamic OpenSSL support, the feature backend will be openssl-dynamic. The feature backend names reflect the compilation options specified to the build system (though in all lower case). The backend may be "builtin" for features that are provided by libgit2 itself.

\n\n

If the feature is not supported by the library, this API returns NULL.

\n", + "group": "libgit2" + }, + "git_libgit2_opts": { + "type": "function", + "file": "git2/common.h", + "line": 569, + "lineto": 569, + "args": [{ "name": "option", "type": "int", "comment": "Option key" }], "argline": "int option", "sig": "int", "return": { @@ -4349,36 +4675,39 @@ "comment": " 0 on success, \n<\n0 on failure" }, "description": "

Set or query a library global option

\n", - "comments": "

Available options:

\n\n
* opts(GIT_OPT_GET_MWINDOW_SIZE, size_t *):\n\n    > Get the maximum mmap window size\n\n* opts(GIT_OPT_SET_MWINDOW_SIZE, size_t):\n\n    > Set the maximum mmap window size\n\n* opts(GIT_OPT_GET_MWINDOW_MAPPED_LIMIT, size_t *):\n\n    > Get the maximum memory that will be mapped in total by the library\n\n* opts(GIT_OPT_SET_MWINDOW_MAPPED_LIMIT, size_t):\n\n    >Set the maximum amount of memory that can be mapped at any time        by the library\n\n* opts(GIT_OPT_GET_SEARCH_PATH, int level, git_buf *buf)\n\n    > Get the search path for a given level of config data.  "level" must       > be one of `GIT_CONFIG_LEVEL_SYSTEM`, `GIT_CONFIG_LEVEL_GLOBAL`,       > `GIT_CONFIG_LEVEL_XDG`, or `GIT_CONFIG_LEVEL_PROGRAMDATA`.        > The search path is written to the `out` buffer.\n\n* opts(GIT_OPT_SET_SEARCH_PATH, int level, const char *path)\n\n    > Set the search path for a level of config data.  The search path      > applied to shared attributes and ignore files, too.       >       > - `path` lists directories delimited by GIT_PATH_LIST_SEPARATOR.      >   Pass NULL to reset to the default (generally based on environment       >   variables).  Use magic path `$PATH` to include the old value        >   of the path (if you want to prepend or append, for instance).       >       > - `level` must be `GIT_CONFIG_LEVEL_SYSTEM`,      >   `GIT_CONFIG_LEVEL_GLOBAL`, `GIT_CONFIG_LEVEL_XDG`, or       >   `GIT_CONFIG_LEVEL_PROGRAMDATA`.\n\n* opts(GIT_OPT_SET_CACHE_OBJECT_LIMIT, git_otype type, size_t size)\n\n    > Set the maximum data size for the given type of object to be      > considered eligible for caching in memory.  Setting to value to       > zero means that that type of object will not be cached.       > Defaults to 0 for GIT_OBJ_BLOB (i.e. won't cache blobs) and 4k        > for GIT_OBJ_COMMIT, GIT_OBJ_TREE, and GIT_OBJ_TAG.\n\n* opts(GIT_OPT_SET_CACHE_MAX_SIZE, ssize_t max_storage_bytes)\n\n    > Set the maximum total data size that will be cached in memory     > across all repositories before libgit2 starts evicting objects        > from the cache.  This is a soft limit, in that the library might      > briefly exceed it, but will start aggressively evicting objects       > from cache when that happens.  The default cache size is 256MB.\n\n* opts(GIT_OPT_ENABLE_CACHING, int enabled)\n\n    > Enable or disable caching completely.     >       > Because caches are repository-specific, disabling the cache       > cannot immediately clear all cached objects, but each cache will      > be cleared on the next attempt to update anything in it.\n\n* opts(GIT_OPT_GET_CACHED_MEMORY, ssize_t *current, ssize_t *allowed)\n\n    > Get the current bytes in cache and the maximum that would be      > allowed in the cache.\n\n* opts(GIT_OPT_GET_TEMPLATE_PATH, git_buf *out)\n\n    > Get the default template path.        > The path is written to the `out` buffer.\n\n* opts(GIT_OPT_SET_TEMPLATE_PATH, const char *path)\n\n    > Set the default template path.        >       > - `path` directory of template.\n\n* opts(GIT_OPT_SET_SSL_CERT_LOCATIONS, const char *file, const char *path)\n\n    > Set the SSL certificate-authority locations.      >       > - `file` is the location of a file containing several     >   certificates concatenated together.     > - `path` is the location of a directory holding several       >   certificates, one per file.     >       > Either parameter may be `NULL`, but not both.\n\n* opts(GIT_OPT_SET_USER_AGENT, const char *user_agent)\n\n    > Set the value of the User-Agent header.  This value will be       > appended to "git/1.0", for compatibility with other git clients.      >       > - `user_agent` is the value that will be delivered as the     >   User-Agent header on HTTP requests.\n\n* opts(GIT_OPT_SET_WINDOWS_SHAREMODE, unsigned long value)\n\n    > Set the share mode used when opening files on Windows.        > For more information, see the documentation for CreateFile.       > The default is: FILE_SHARE_READ | FILE_SHARE_WRITE.  This is      > ignored and unused on non-Windows platforms.\n\n* opts(GIT_OPT_GET_WINDOWS_SHAREMODE, unsigned long *value)\n\n    > Get the share mode used when opening files on Windows.\n\n* opts(GIT_OPT_ENABLE_STRICT_OBJECT_CREATION, int enabled)\n\n    > Enable strict input validation when creating new objects      > to ensure that all inputs to the new objects are valid.  For      > example, when this is enabled, the parent(s) and tree inputs      > will be validated when creating a new commit.  This defaults      > to enabled.\n\n* opts(GIT_OPT_ENABLE_STRICT_SYMBOLIC_REF_CREATION, int enabled)\n\n    > Validate the target of a symbolic ref when creating it.  For      > example, `foobar` is not a valid ref, therefore `foobar` is       > not a valid target for a symbolic ref by default, whereas     > `refs/heads/foobar` is.  Disabling this bypasses validation       > so that an arbitrary strings such as `foobar` can be used     > for a symbolic ref target.  This defaults to enabled.\n\n* opts(GIT_OPT_SET_SSL_CIPHERS, const char *ciphers)\n\n    > Set the SSL ciphers use for HTTPS connections.        >       > - `ciphers` is the list of ciphers that are eanbled.\n\n* opts(GIT_OPT_ENABLE_OFS_DELTA, int enabled)\n\n    > Enable or disable the use of "offset deltas" when creating packfiles,     > and the negotiation of them when talking to a remote server.      > Offset deltas store a delta base location as an offset into the       > packfile from the current location, which provides a shorter encoding     > and thus smaller resultant packfiles.     > Packfiles containing offset deltas can still be read.     > This defaults to enabled.\n\n* opts(GIT_OPT_ENABLE_FSYNC_GITDIR, int enabled)\n\n    > Enable synchronized writes of files in the gitdir using `fsync`       > (or the platform equivalent) to ensure that new object data       > is written to permanent storage, not simply cached.  This     > defaults to disabled.\n\n opts(GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION, int enabled)\n\n    > Enable strict verification of object hashsums when reading        > objects from disk. This may impact performance due to an      > additional checksum calculation on each object. This defaults     > to enabled.\n
\n", + "comments": "

Available options:

\n\n
* opts(GIT_OPT_GET_MWINDOW_SIZE, size_t *):\n\n    > Get the maximum mmap window size\n\n* opts(GIT_OPT_SET_MWINDOW_SIZE, size_t):\n\n    > Set the maximum mmap window size\n\n* opts(GIT_OPT_GET_MWINDOW_MAPPED_LIMIT, size_t *):\n\n    > Get the maximum memory that will be mapped in total by the library\n\n* opts(GIT_OPT_SET_MWINDOW_MAPPED_LIMIT, size_t):\n\n    > Set the maximum amount of memory that can be mapped at any time       > by the library\n\n* opts(GIT_OPT_GET_MWINDOW_FILE_LIMIT, size_t *):\n\n    > Get the maximum number of files that will be mapped at any time by the        > library\n\n* opts(GIT_OPT_SET_MWINDOW_FILE_LIMIT, size_t):\n\n    > Set the maximum number of files that can be mapped at any time        > by the library. The default (0) is unlimited.\n\n* opts(GIT_OPT_GET_SEARCH_PATH, int level, git_buf *buf)\n\n    > Get the search path for a given level of config data.  "level" must       > be one of `GIT_CONFIG_LEVEL_SYSTEM`, `GIT_CONFIG_LEVEL_GLOBAL`,       > `GIT_CONFIG_LEVEL_XDG`, or `GIT_CONFIG_LEVEL_PROGRAMDATA`.        > The search path is written to the `out` buffer.\n\n* opts(GIT_OPT_SET_SEARCH_PATH, int level, const char *path)\n\n    > Set the search path for a level of config data.  The search path      > applied to shared attributes and ignore files, too.       >       > - `path` lists directories delimited by GIT_PATH_LIST_SEPARATOR.      >   Pass NULL to reset to the default (generally based on environment       >   variables).  Use magic path `$PATH` to include the old value        >   of the path (if you want to prepend or append, for instance).       >       > - `level` must be `GIT_CONFIG_LEVEL_SYSTEM`,      >   `GIT_CONFIG_LEVEL_GLOBAL`, `GIT_CONFIG_LEVEL_XDG`, or       >   `GIT_CONFIG_LEVEL_PROGRAMDATA`.\n\n* opts(GIT_OPT_SET_CACHE_OBJECT_LIMIT, git_object_t type, size_t size)\n\n    > Set the maximum data size for the given type of object to be      > considered eligible for caching in memory.  Setting to value to       > zero means that that type of object will not be cached.       > Defaults to 0 for GIT_OBJECT_BLOB (i.e. won't cache blobs) and 4k     > for GIT_OBJECT_COMMIT, GIT_OBJECT_TREE, and GIT_OBJECT_TAG.\n\n* opts(GIT_OPT_SET_CACHE_MAX_SIZE, ssize_t max_storage_bytes)\n\n    > Set the maximum total data size that will be cached in memory     > across all repositories before libgit2 starts evicting objects        > from the cache.  This is a soft limit, in that the library might      > briefly exceed it, but will start aggressively evicting objects       > from cache when that happens.  The default cache size is 256MB.\n\n* opts(GIT_OPT_ENABLE_CACHING, int enabled)\n\n    > Enable or disable caching completely.     >       > Because caches are repository-specific, disabling the cache       > cannot immediately clear all cached objects, but each cache will      > be cleared on the next attempt to update anything in it.\n\n* opts(GIT_OPT_GET_CACHED_MEMORY, ssize_t *current, ssize_t *allowed)\n\n    > Get the current bytes in cache and the maximum that would be      > allowed in the cache.\n\n* opts(GIT_OPT_GET_TEMPLATE_PATH, git_buf *out)\n\n    > Get the default template path.        > The path is written to the `out` buffer.\n\n* opts(GIT_OPT_SET_TEMPLATE_PATH, const char *path)\n\n    > Set the default template path.        >       > - `path` directory of template.\n\n* opts(GIT_OPT_SET_SSL_CERT_LOCATIONS, const char *file, const char *path)\n\n    > Set the SSL certificate-authority locations.      >       > - `file` is the location of a file containing several     >   certificates concatenated together.     > - `path` is the location of a directory holding several       >   certificates, one per file.     >       > Calling `GIT_OPT_ADD_SSL_X509_CERT` may override the      > data in `path`.       >       > Either parameter may be `NULL`, but not both.\n
\n\n\n\n

opts(GIT_OPT_SET_ODB_PACKED_PRIORITY, int priority) > Override the default priority of the packed ODB backend which > is added when default backends are assigned to a repository

\n\n

opts(GIT_OPT_SET_ODB_LOOSE_PRIORITY, int priority) > Override the default priority of the loose ODB backend which > is added when default backends are assigned to a repository

\n\n

opts(GIT_OPT_GET_EXTENSIONS, git_strarray *out) > Returns the list of git extensions that are supported. This > is the list of built-in extensions supported by libgit2 and > custom extensions that have been added with > GIT_OPT_SET_EXTENSIONS. Extensions that have been negated > will not be returned. The returned list should be released > with git_strarray_dispose.

\n\n

opts(GIT_OPT_SET_EXTENSIONS, const char **extensions, size_t len) > Set that the given git extensions are supported by the caller. > Extensions supported by libgit2 may be negated by prefixing > them with a !. For example: setting extensions to > { "!noop", "newext" } indicates that the caller does not want > to support repositories with the noop extension but does want > to support repositories with the newext extension.

\n\n

opts(GIT_OPT_GET_OWNER_VALIDATION, int *enabled) > Gets the owner validation setting for repository > directories.

\n\n

opts(GIT_OPT_SET_OWNER_VALIDATION, int enabled) > Set that repository directories should be owned by the current > user. The default is to validate ownership.

\n\n

opts(GIT_OPT_GET_HOMEDIR, git_buf *out) > Gets the current user's home directory, as it will be used > for file lookups. The path is written to the out buffer.

\n\n

opts(GIT_OPT_SET_HOMEDIR, const char *path) > Sets the directory used as the current user's home directory, > for file lookups. > > - path directory of home directory.

\n\n

opts(GIT_OPT_GET_SERVER_CONNECT_TIMEOUT, int *timeout) > Gets the timeout (in milliseconds) to attempt connections to > a remote server.

\n\n

opts(GIT_OPT_SET_SERVER_CONNECT_TIMEOUT, int timeout) > Sets the timeout (in milliseconds) to attempt connections to > a remote server. Set to 0 to use the system default. Note that > this may not be able to be configured longer than the system > default, typically 75 seconds.

\n\n

opts(GIT_OPT_GET_SERVER_TIMEOUT, int *timeout) > Gets the timeout (in milliseconds) for reading from and writing > to a remote server.

\n\n

opts(GIT_OPT_SET_SERVER_TIMEOUT, int timeout) > Sets the timeout (in milliseconds) for reading from and writing > to a remote server. Set to 0 to use the system default.

\n", "group": "libgit2" }, "git_config_entry_free": { "type": "function", - "file": "config.h", - "line": 75, - "lineto": 75, + "file": "git2/config.h", + "line": 131, + "lineto": 131, "args": [ { - "name": "", + "name": "entry", "type": "git_config_entry *", - "comment": null + "comment": "The entry to free." } ], - "argline": "git_config_entry *", + "argline": "git_config_entry *entry", "sig": "git_config_entry *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Free a config entry

\n", + "return": { "type": "void", "comment": null }, + "description": "

Free a config entry.

\n", "comments": "", - "group": "config" + "group": "config", + "examples": { + "config.c": [ + "ex/v1.9.1/config.html#git_config_entry_free-1", + "ex/v1.9.1/config.html#git_config_entry_free-2" + ] + } }, "git_config_find_global": { "type": "function", - "file": "config.h", - "line": 116, - "lineto": 116, + "file": "git2/config.h", + "line": 183, + "lineto": 183, "args": [ { "name": "out", @@ -4393,14 +4722,14 @@ "comment": " 0 if a global configuration file has been found. Its path will be stored in `out`." }, "description": "

Locate the path to the global configuration file

\n", - "comments": "

The user or global configuration file is usually located in $HOME/.gitconfig.

\n\n

This method will try to guess the full path to that file, if the file exists. The returned path may be used on any git_config call to load the global configuration file.

\n\n

This method will not guess the path to the xdg compatible config file (.config/git/config).

\n", + "comments": "

The user or global configuration file is usually located in $HOME/.gitconfig.

\n\n

This method will try to guess the full path to that file, if the file exists. The returned path may be used on any git_config call to load the global configuration file.

\n\n

This method will not guess the path to the xdg compatible config file (.config/git/config).

\n", "group": "config" }, "git_config_find_xdg": { "type": "function", - "file": "config.h", - "line": 133, - "lineto": 133, + "file": "git2/config.h", + "line": 200, + "lineto": 200, "args": [ { "name": "out", @@ -4420,9 +4749,9 @@ }, "git_config_find_system": { "type": "function", - "file": "config.h", - "line": 145, - "lineto": 145, + "file": "git2/config.h", + "line": 212, + "lineto": 212, "args": [ { "name": "out", @@ -4437,14 +4766,14 @@ "comment": " 0 if a system configuration file has been\n\tfound. Its path will be stored in `out`." }, "description": "

Locate the path to the system configuration file

\n", - "comments": "

If /etc/gitconfig doesn't exist, it will look for %PROGRAMFILES%.

\n", + "comments": "

If /etc/gitconfig doesn't exist, it will look for %PROGRAMFILES%.

\n", "group": "config" }, "git_config_find_programdata": { "type": "function", - "file": "config.h", - "line": 156, - "lineto": 156, + "file": "git2/config.h", + "line": 223, + "lineto": 223, "args": [ { "name": "out", @@ -4459,14 +4788,14 @@ "comment": " 0 if a ProgramData configuration file has been\n\tfound. Its path will be stored in `out`." }, "description": "

Locate the path to the configuration file in ProgramData

\n", - "comments": "

Look for the file in %PROGRAMDATA% used by portable git.

\n", + "comments": "

Look for the file in %PROGRAMDATA% used by portable git.

\n", "group": "config" }, "git_config_open_default": { "type": "function", - "file": "config.h", - "line": 168, - "lineto": 168, + "file": "git2/config.h", + "line": 235, + "lineto": 235, "args": [ { "name": "out", @@ -4476,19 +4805,16 @@ ], "argline": "git_config **out", "sig": "git_config **", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Open the global, XDG and system configuration files

\n", "comments": "

Utility wrapper that finds the global, XDG and system configuration files and opens them into a single prioritized config object that can be used when accessing default config data outside a repository.

\n", "group": "config" }, "git_config_new": { "type": "function", - "file": "config.h", - "line": 179, - "lineto": 179, + "file": "git2/config.h", + "line": 246, + "lineto": 246, "args": [ { "name": "out", @@ -4498,19 +4824,16 @@ ], "argline": "git_config **out", "sig": "git_config **", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Allocate a new configuration object

\n", "comments": "

This object is empty, so you have to add a file to it before you can do anything with it.

\n", "group": "config" }, "git_config_add_file_ondisk": { "type": "function", - "file": "config.h", - "line": 208, - "lineto": 213, + "file": "git2/config.h", + "line": 275, + "lineto": 280, "args": [ { "name": "cfg", @@ -4550,9 +4873,9 @@ }, "git_config_open_ondisk": { "type": "function", - "file": "config.h", - "line": 227, - "lineto": 227, + "file": "git2/config.h", + "line": 294, + "lineto": 294, "args": [ { "name": "out", @@ -4567,24 +4890,19 @@ ], "argline": "git_config **out, const char *path", "sig": "git_config **::const char *", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Create a new config instance containing a single on-disk file

\n", "comments": "

This method is a simple utility wrapper for the following sequence of calls: - git_config_new - git_config_add_file_ondisk

\n", "group": "config", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_config_open_ondisk-26" - ] + "general.c": ["ex/v1.9.1/general.html#git_config_open_ondisk-26"] } }, "git_config_open_level": { "type": "function", - "file": "config.h", - "line": 245, - "lineto": 248, + "file": "git2/config.h", + "line": 312, + "lineto": 315, "args": [ { "name": "out", @@ -4614,9 +4932,9 @@ }, "git_config_open_global": { "type": "function", - "file": "config.h", - "line": 262, - "lineto": 262, + "file": "git2/config.h", + "line": 330, + "lineto": 330, "args": [ { "name": "out", @@ -4631,19 +4949,45 @@ ], "argline": "git_config **out, git_config *config", "sig": "git_config **::git_config *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Open the global/XDG configuration file according to git's rules

\n", - "comments": "

Git allows you to store your global configuration at $HOME/.config or $XDG_CONFIG_HOME/git/config. For backwards compatability, the XDG file shouldn't be used unless the use has created it explicitly. With this function you'll open the correct one to write to.

\n", + "comments": "

Git allows you to store your global configuration at $HOME/.gitconfig or $XDG_CONFIG_HOME/git/config. For backwards compatibility, the XDG file shouldn't be used unless the use has created it explicitly. With this function you'll open the correct one to write to.

\n", + "group": "config" + }, + "git_config_set_writeorder": { + "type": "function", + "file": "git2/config.h", + "line": 343, + "lineto": 346, + "args": [ + { + "name": "cfg", + "type": "git_config *", + "comment": "the configuration to change write order of" + }, + { + "name": "levels", + "type": "git_config_level_t *", + "comment": "the ordering of levels for writing" + }, + { + "name": "len", + "type": "size_t", + "comment": "the length of the levels array" + } + ], + "argline": "git_config *cfg, git_config_level_t *levels, size_t len", + "sig": "git_config *::git_config_level_t *::size_t", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Set the write order for configuration backends. By default, the\n write ordering does not match the read ordering; for example, the\n worktree configuration is a high-priority for reading, but is not\n written to unless explicitly chosen.

\n", + "comments": "", "group": "config" }, "git_config_snapshot": { "type": "function", - "file": "config.h", - "line": 278, - "lineto": 278, + "file": "git2/config.h", + "line": 362, + "lineto": 362, "args": [ { "name": "out", @@ -4658,19 +5002,16 @@ ], "argline": "git_config **out, git_config *config", "sig": "git_config **::git_config *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a snapshot of the configuration

\n", "comments": "

Create a snapshot of the current state of a configuration, which allows you to look into a consistent view of the configuration for looking up complex values (e.g. a remote, submodule).

\n\n

The string returned when querying such a config object is valid until it is freed.

\n", "group": "config" }, "git_config_free": { "type": "function", - "file": "config.h", - "line": 285, - "lineto": 285, + "file": "git2/config.h", + "line": 369, + "lineto": 369, "args": [ { "name": "cfg", @@ -4680,25 +5021,23 @@ ], "argline": "git_config *cfg", "sig": "git_config *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free the configuration and its associated memory and files

\n", "comments": "", "group": "config", "examples": { + "config.c": ["ex/v1.9.1/config.html#git_config_free-3"], "general.c": [ - "ex/HEAD/general.html#git_config_free-27", - "ex/HEAD/general.html#git_config_free-28" + "ex/v1.9.1/general.html#git_config_free-27", + "ex/v1.9.1/general.html#git_config_free-28" ] } }, "git_config_get_entry": { "type": "function", - "file": "config.h", - "line": 297, - "lineto": 300, + "file": "git2/config.h", + "line": 381, + "lineto": 384, "args": [ { "name": "out", @@ -4718,19 +5057,19 @@ ], "argline": "git_config_entry **out, const git_config *cfg, const char *name", "sig": "git_config_entry **::const git_config *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the git_config_entry of a config variable.

\n", "comments": "

Free the git_config_entry after use with git_config_entry_free().

\n", - "group": "config" + "group": "config", + "examples": { + "config.c": ["ex/v1.9.1/config.html#git_config_get_entry-4"] + } }, "git_config_get_int32": { "type": "function", - "file": "config.h", - "line": 314, - "lineto": 314, + "file": "git2/config.h", + "line": 398, + "lineto": 398, "args": [ { "name": "out", @@ -4750,25 +5089,22 @@ ], "argline": "int32_t *out, const git_config *cfg, const char *name", "sig": "int32_t *::const git_config *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the value of an integer config variable.

\n", "comments": "

All config files will be looked into, in the order of their defined level. A higher level means a higher priority. The first occurrence of the variable will be returned here.

\n", "group": "config", "examples": { "general.c": [ - "ex/HEAD/general.html#git_config_get_int32-29", - "ex/HEAD/general.html#git_config_get_int32-30" + "ex/v1.9.1/general.html#git_config_get_int32-29", + "ex/v1.9.1/general.html#git_config_get_int32-30" ] } }, "git_config_get_int64": { "type": "function", - "file": "config.h", - "line": 328, - "lineto": 328, + "file": "git2/config.h", + "line": 412, + "lineto": 412, "args": [ { "name": "out", @@ -4788,19 +5124,16 @@ ], "argline": "int64_t *out, const git_config *cfg, const char *name", "sig": "int64_t *::const git_config *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the value of a long integer config variable.

\n", "comments": "

All config files will be looked into, in the order of their defined level. A higher level means a higher priority. The first occurrence of the variable will be returned here.

\n", "group": "config" }, "git_config_get_bool": { "type": "function", - "file": "config.h", - "line": 345, - "lineto": 345, + "file": "git2/config.h", + "line": 429, + "lineto": 429, "args": [ { "name": "out", @@ -4820,19 +5153,16 @@ ], "argline": "int *out, const git_config *cfg, const char *name", "sig": "int *::const git_config *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the value of a boolean config variable.

\n", "comments": "

This function uses the usual C convention of 0 being false and anything else true.

\n\n

All config files will be looked into, in the order of their defined level. A higher level means a higher priority. The first occurrence of the variable will be returned here.

\n", "group": "config" }, "git_config_get_path": { "type": "function", - "file": "config.h", - "line": 363, - "lineto": 363, + "file": "git2/config.h", + "line": 447, + "lineto": 447, "args": [ { "name": "out", @@ -4852,19 +5182,16 @@ ], "argline": "git_buf *out, const git_config *cfg, const char *name", "sig": "git_buf *::const git_config *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the value of a path config variable.

\n", "comments": "

A leading '~' will be expanded to the global search path (which defaults to the user's home directory but can be overridden via git_libgit2_opts().

\n\n

All config files will be looked into, in the order of their defined level. A higher level means a higher priority. The first occurrence of the variable will be returned here.

\n", "group": "config" }, "git_config_get_string": { "type": "function", - "file": "config.h", - "line": 381, - "lineto": 381, + "file": "git2/config.h", + "line": 465, + "lineto": 465, "args": [ { "name": "out", @@ -4884,25 +5211,22 @@ ], "argline": "const char **out, const git_config *cfg, const char *name", "sig": "const char **::const git_config *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the value of a string config variable.

\n", "comments": "

This function can only be used on snapshot config objects. The string is owned by the config and should not be freed by the user. The pointer will be valid until the config is freed.

\n\n

All config files will be looked into, in the order of their defined level. A higher level means a higher priority. The first occurrence of the variable will be returned here.

\n", "group": "config", "examples": { "general.c": [ - "ex/HEAD/general.html#git_config_get_string-31", - "ex/HEAD/general.html#git_config_get_string-32" + "ex/v1.9.1/general.html#git_config_get_string-31", + "ex/v1.9.1/general.html#git_config_get_string-32" ] } }, "git_config_get_string_buf": { "type": "function", - "file": "config.h", - "line": 397, - "lineto": 397, + "file": "git2/config.h", + "line": 481, + "lineto": 481, "args": [ { "name": "out", @@ -4922,19 +5246,16 @@ ], "argline": "git_buf *out, const git_config *cfg, const char *name", "sig": "git_buf *::const git_config *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the value of a string config variable.

\n", "comments": "

The value of the config will be copied into the buffer.

\n\n

All config files will be looked into, in the order of their defined level. A higher level means a higher priority. The first occurrence of the variable will be returned here.

\n", "group": "config" }, "git_config_get_multivar_foreach": { "type": "function", - "file": "config.h", - "line": 415, - "lineto": 415, + "file": "git2/config.h", + "line": 500, + "lineto": 500, "args": [ { "name": "cfg", @@ -4964,19 +5285,16 @@ ], "argline": "const git_config *cfg, const char *name, const char *regexp, git_config_foreach_cb callback, void *payload", "sig": "const git_config *::const char *::const char *::git_config_foreach_cb::void *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Get each value of a multivar in a foreach callback

\n", "comments": "

The callback will be called on each variable found

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the section and variable parts are lower-cased. The subsection is left unchanged.

\n", "group": "config" }, "git_config_multivar_iterator_new": { "type": "function", - "file": "config.h", - "line": 430, - "lineto": 430, + "file": "git2/config.h", + "line": 516, + "lineto": 516, "args": [ { "name": "out", @@ -5001,19 +5319,16 @@ ], "argline": "git_config_iterator **out, const git_config *cfg, const char *name, const char *regexp", "sig": "git_config_iterator **::const git_config *::const char *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Get each value of a multivar

\n", "comments": "

The regular expression is applied case-sensitively on the normalized form of the variable name: the section and variable parts are lower-cased. The subsection is left unchanged.

\n", "group": "config" }, "git_config_next": { "type": "function", - "file": "config.h", - "line": 442, - "lineto": 442, + "file": "git2/config.h", + "line": 528, + "lineto": 528, "args": [ { "name": "entry", @@ -5033,14 +5348,14 @@ "comment": " 0 or an error code. GIT_ITEROVER if the iteration has completed" }, "description": "

Return the current entry and advance the iterator

\n", - "comments": "

The pointers returned by this function are valid until the iterator is freed.

\n", + "comments": "

The pointers returned by this function are valid until the next call to git_config_next or until the iterator is freed.

\n", "group": "config" }, "git_config_iterator_free": { "type": "function", - "file": "config.h", - "line": 449, - "lineto": 449, + "file": "git2/config.h", + "line": 535, + "lineto": 535, "args": [ { "name": "iter", @@ -5050,19 +5365,16 @@ ], "argline": "git_config_iterator *iter", "sig": "git_config_iterator *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a config iterator

\n", "comments": "", "group": "config" }, "git_config_set_int32": { "type": "function", - "file": "config.h", - "line": 460, - "lineto": 460, + "file": "git2/config.h", + "line": 546, + "lineto": 546, "args": [ { "name": "cfg", @@ -5082,19 +5394,16 @@ ], "argline": "git_config *cfg, const char *name, int32_t value", "sig": "git_config *::const char *::int32_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Set the value of an integer config variable in the config file\n with the highest level (usually the local one).

\n", "comments": "", "group": "config" }, "git_config_set_int64": { "type": "function", - "file": "config.h", - "line": 471, - "lineto": 471, + "file": "git2/config.h", + "line": 557, + "lineto": 557, "args": [ { "name": "cfg", @@ -5114,19 +5423,16 @@ ], "argline": "git_config *cfg, const char *name, int64_t value", "sig": "git_config *::const char *::int64_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Set the value of a long integer config variable in the config file\n with the highest level (usually the local one).

\n", "comments": "", "group": "config" }, "git_config_set_bool": { "type": "function", - "file": "config.h", - "line": 482, - "lineto": 482, + "file": "git2/config.h", + "line": 568, + "lineto": 568, "args": [ { "name": "cfg", @@ -5138,27 +5444,20 @@ "type": "const char *", "comment": "the variable's name" }, - { - "name": "value", - "type": "int", - "comment": "the value to store" - } + { "name": "value", "type": "int", "comment": "the value to store" } ], "argline": "git_config *cfg, const char *name, int value", "sig": "git_config *::const char *::int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Set the value of a boolean config variable in the config file\n with the highest level (usually the local one).

\n", "comments": "", "group": "config" }, "git_config_set_string": { "type": "function", - "file": "config.h", - "line": 496, - "lineto": 496, + "file": "git2/config.h", + "line": 582, + "lineto": 582, "args": [ { "name": "cfg", @@ -5178,19 +5477,19 @@ ], "argline": "git_config *cfg, const char *name, const char *value", "sig": "git_config *::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Set the value of a string config variable in the config file\n with the highest level (usually the local one).

\n", "comments": "

A copy of the string is made and the user is free to use it afterwards.

\n", - "group": "config" + "group": "config", + "examples": { + "config.c": ["ex/v1.9.1/config.html#git_config_set_string-5"] + } }, "git_config_set_multivar": { "type": "function", - "file": "config.h", - "line": 508, - "lineto": 508, + "file": "git2/config.h", + "line": 595, + "lineto": 595, "args": [ { "name": "cfg", @@ -5207,27 +5506,20 @@ "type": "const char *", "comment": "a regular expression to indicate which values to replace" }, - { - "name": "value", - "type": "const char *", - "comment": "the new value." - } + { "name": "value", "type": "const char *", "comment": "the new value." } ], "argline": "git_config *cfg, const char *name, const char *regexp, const char *value", "sig": "git_config *::const char *::const char *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Set a multivar in the local config file.

\n", "comments": "

The regular expression is applied case-sensitively on the value.

\n", "group": "config" }, "git_config_delete_entry": { "type": "function", - "file": "config.h", - "line": 517, - "lineto": 517, + "file": "git2/config.h", + "line": 605, + "lineto": 605, "args": [ { "name": "cfg", @@ -5242,19 +5534,16 @@ ], "argline": "git_config *cfg, const char *name", "sig": "git_config *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Delete a config variable from the config file\n with the highest level (usually the local one).

\n", "comments": "", "group": "config" }, "git_config_delete_multivar": { "type": "function", - "file": "config.h", - "line": 530, - "lineto": 530, + "file": "git2/config.h", + "line": 618, + "lineto": 618, "args": [ { "name": "cfg", @@ -5274,19 +5563,16 @@ ], "argline": "git_config *cfg, const char *name, const char *regexp", "sig": "git_config *::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Deletes one or several entries from a multivar in the local config file.

\n", "comments": "

The regular expression is applied case-sensitively on the value.

\n", "group": "config" }, "git_config_foreach": { "type": "function", - "file": "config.h", - "line": 548, - "lineto": 551, + "file": "git2/config.h", + "line": 636, + "lineto": 639, "args": [ { "name": "cfg", @@ -5316,9 +5602,9 @@ }, "git_config_iterator_new": { "type": "function", - "file": "config.h", - "line": 562, - "lineto": 562, + "file": "git2/config.h", + "line": 651, + "lineto": 651, "args": [ { "name": "out", @@ -5328,24 +5614,21 @@ { "name": "cfg", "type": "const git_config *", - "comment": "where to ge the variables from" + "comment": "where to get the variables from" } ], "argline": "git_config_iterator **out, const git_config *cfg", "sig": "git_config_iterator **::const git_config *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Iterate over all the config variables

\n", "comments": "

Use git_config_next to advance the iteration and git_config_iterator_free when done.

\n", "group": "config" }, "git_config_iterator_glob_new": { "type": "function", - "file": "config.h", - "line": 578, - "lineto": 578, + "file": "git2/config.h", + "line": 668, + "lineto": 668, "args": [ { "name": "out", @@ -5365,19 +5648,16 @@ ], "argline": "git_config_iterator **out, const git_config *cfg, const char *regexp", "sig": "git_config_iterator **::const git_config *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Iterate over all the config variables whose name matches a pattern

\n", "comments": "

Use git_config_next to advance the iteration and git_config_iterator_free when done.

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the section and variable parts are lower-cased. The subsection is left unchanged.

\n", "group": "config" }, "git_config_foreach_match": { "type": "function", - "file": "config.h", - "line": 600, - "lineto": 604, + "file": "git2/config.h", + "line": 690, + "lineto": 694, "args": [ { "name": "cfg", @@ -5407,14 +5687,14 @@ "comment": " 0 or the return value of the callback which didn't return 0" }, "description": "

Perform an operation on each config variable matching a regular expression.

\n", - "comments": "

This behaviors like git_config_foreach with an additional filter of a regular expression that filters which config keys are passed to the callback.

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the section and variable parts are lower-cased. The subsection is left unchanged.

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the case-insensitive parts are lower-case.

\n", + "comments": "

This behaves like git_config_foreach with an additional filter of a regular expression that filters which config keys are passed to the callback.

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the section and variable parts are lower-cased. The subsection is left unchanged.

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the case-insensitive parts are lower-case.

\n", "group": "config" }, "git_config_get_mapped": { "type": "function", - "file": "config.h", - "line": 640, - "lineto": 645, + "file": "git2/config.h", + "line": 730, + "lineto": 735, "args": [ { "name": "out", @@ -5433,8 +5713,8 @@ }, { "name": "maps", - "type": "const git_cvar_map *", - "comment": "array of `git_cvar_map` objects specifying the possible mappings" + "type": "const git_configmap *", + "comment": "array of `git_configmap` objects specifying the possible mappings" }, { "name": "map_n", @@ -5442,21 +5722,21 @@ "comment": "number of mapping objects in `maps`" } ], - "argline": "int *out, const git_config *cfg, const char *name, const git_cvar_map *maps, size_t map_n", - "sig": "int *::const git_config *::const char *::const git_cvar_map *::size_t", + "argline": "int *out, const git_config *cfg, const char *name, const git_configmap *maps, size_t map_n", + "sig": "int *::const git_config *::const char *::const git_configmap *::size_t", "return": { "type": "int", "comment": " 0 on success, error code otherwise" }, "description": "

Query the value of a config variable and return it mapped to\n an integer constant.

\n", - "comments": "

This is a helper method to easily map different possible values to a variable to integer constants that easily identify them.

\n\n

A mapping array looks as follows:

\n\n
git_cvar_map autocrlf_mapping[] = {     {GIT_CVAR_FALSE, NULL, GIT_AUTO_CRLF_FALSE},        {GIT_CVAR_TRUE, NULL, GIT_AUTO_CRLF_TRUE},      {GIT_CVAR_STRING, "input", GIT_AUTO_CRLF_INPUT},        {GIT_CVAR_STRING, "default", GIT_AUTO_CRLF_DEFAULT}};\n
\n\n

On any "false" value for the variable (e.g. "false", "FALSE", "no"), the mapping will store GIT_AUTO_CRLF_FALSE in the out parameter.

\n\n

The same thing applies for any "true" value such as "true", "yes" or "1", storing the GIT_AUTO_CRLF_TRUE variable.

\n\n

Otherwise, if the value matches the string "input" (with case insensitive comparison), the given constant will be stored in out, and likewise for "default".

\n\n

If not a single match can be made to store in out, an error code will be returned.

\n", + "comments": "

This is a helper method to easily map different possible values to a variable to integer constants that easily identify them.

\n\n

A mapping array looks as follows:

\n\n
git_configmap autocrlf_mapping[] = {        {GIT_CVAR_FALSE, NULL, GIT_AUTO_CRLF_FALSE},        {GIT_CVAR_TRUE, NULL, GIT_AUTO_CRLF_TRUE},      {GIT_CVAR_STRING, "input", GIT_AUTO_CRLF_INPUT},        {GIT_CVAR_STRING, "default", GIT_AUTO_CRLF_DEFAULT}};\n
\n\n

On any "false" value for the variable (e.g. "false", "FALSE", "no"), the mapping will store GIT_AUTO_CRLF_FALSE in the out parameter.

\n\n

The same thing applies for any "true" value such as "true", "yes" or "1", storing the GIT_AUTO_CRLF_TRUE variable.

\n\n

Otherwise, if the value matches the string "input" (with case insensitive comparison), the given constant will be stored in out, and likewise for "default".

\n\n

If not a single match can be made to store in out, an error code will be returned.

\n", "group": "config" }, "git_config_lookup_map_value": { "type": "function", - "file": "config.h", - "line": 655, - "lineto": 659, + "file": "git2/config.h", + "line": 746, + "lineto": 750, "args": [ { "name": "out", @@ -5465,116 +5745,88 @@ }, { "name": "maps", - "type": "const git_cvar_map *", - "comment": "array of `git_cvar_map` objects specifying the possible mappings" + "type": "const git_configmap *", + "comment": "array of `git_configmap` objects specifying the possible mappings" }, { "name": "map_n", "type": "size_t", "comment": "number of mapping objects in `maps`" }, - { - "name": "value", - "type": "const char *", - "comment": "value to parse" - } + { "name": "value", "type": "const char *", "comment": "value to parse" } ], - "argline": "int *out, const git_cvar_map *maps, size_t map_n, const char *value", - "sig": "int *::const git_cvar_map *::size_t::const char *", - "return": { - "type": "int", - "comment": null - }, + "argline": "int *out, const git_configmap *maps, size_t map_n, const char *value", + "sig": "int *::const git_configmap *::size_t::const char *", + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Maps a string value to an integer constant

\n", "comments": "", "group": "config" }, "git_config_parse_bool": { "type": "function", - "file": "config.h", - "line": 671, - "lineto": 671, + "file": "git2/config.h", + "line": 763, + "lineto": 763, "args": [ { "name": "out", "type": "int *", "comment": "place to store the result of the parsing" }, - { - "name": "value", - "type": "const char *", - "comment": "value to parse" - } + { "name": "value", "type": "const char *", "comment": "value to parse" } ], "argline": "int *out, const char *value", "sig": "int *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Parse a string value as a bool.

\n", "comments": "

Valid values for true are: 'true', 'yes', 'on', 1 or any number different from 0 Valid values for false are: 'false', 'no', 'off', 0

\n", "group": "config" }, "git_config_parse_int32": { "type": "function", - "file": "config.h", - "line": 683, - "lineto": 683, + "file": "git2/config.h", + "line": 776, + "lineto": 776, "args": [ { "name": "out", "type": "int32_t *", "comment": "place to store the result of the parsing" }, - { - "name": "value", - "type": "const char *", - "comment": "value to parse" - } + { "name": "value", "type": "const char *", "comment": "value to parse" } ], "argline": "int32_t *out, const char *value", "sig": "int32_t *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Parse a string value as an int32.

\n", "comments": "

An optional value suffix of 'k', 'm', or 'g' will cause the value to be multiplied by 1024, 1048576, or 1073741824 prior to output.

\n", "group": "config" }, "git_config_parse_int64": { "type": "function", - "file": "config.h", - "line": 695, - "lineto": 695, + "file": "git2/config.h", + "line": 789, + "lineto": 789, "args": [ { "name": "out", "type": "int64_t *", "comment": "place to store the result of the parsing" }, - { - "name": "value", - "type": "const char *", - "comment": "value to parse" - } + { "name": "value", "type": "const char *", "comment": "value to parse" } ], "argline": "int64_t *out, const char *value", "sig": "int64_t *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Parse a string value as an int64.

\n", "comments": "

An optional value suffix of 'k', 'm', or 'g' will cause the value to be multiplied by 1024, 1048576, or 1073741824 prior to output.

\n", "group": "config" }, "git_config_parse_path": { "type": "function", - "file": "config.h", - "line": 710, - "lineto": 710, + "file": "git2/config.h", + "line": 805, + "lineto": 805, "args": [ { "name": "out", @@ -5589,19 +5841,16 @@ ], "argline": "git_buf *out, const char *value", "sig": "git_buf *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Parse a string value as a path.

\n", "comments": "

A leading '~' will be expanded to the global search path (which defaults to the user's home directory but can be overridden via git_libgit2_opts().

\n\n

If the value does not begin with a tilde, the input will be returned.

\n", "group": "config" }, "git_config_backend_foreach_match": { "type": "function", - "file": "config.h", - "line": 728, - "lineto": 732, + "file": "git2/config.h", + "line": 824, + "lineto": 828, "args": [ { "name": "backend", @@ -5626,19 +5875,16 @@ ], "argline": "git_config_backend *backend, const char *regexp, git_config_foreach_cb callback, void *payload", "sig": "git_config_backend *::const char *::git_config_foreach_cb::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Perform an operation on each config variable in given config backend\n matching a regular expression.

\n", - "comments": "

This behaviors like git_config_foreach_match except instead of all config entries it just enumerates through the given backend entry.

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the section and variable parts are lower-cased. The subsection is left unchanged.

\n", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Perform an operation on each config variable in a given config backend,\n matching a regular expression.

\n", + "comments": "

This behaves like git_config_foreach_match except that only config entries from the given backend entry are enumerated.

\n\n

The regular expression is applied case-sensitively on the normalized form of the variable name: the section and variable parts are lower-cased. The subsection is left unchanged.

\n", "group": "config" }, "git_config_lock": { "type": "function", - "file": "config.h", - "line": 751, - "lineto": 751, + "file": "git2/config.h", + "line": 847, + "lineto": 847, "args": [ { "name": "tx", @@ -5653,2808 +5899,2955 @@ ], "argline": "git_transaction **tx, git_config *cfg", "sig": "git_transaction **::git_config *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lock the backend with the highest priority

\n", "comments": "

Locking disallows anybody else from writing to that backend. Any updates made after locking will not be visible to a reader until the file is unlocked.

\n\n

You can apply the changes by calling git_transaction_commit() before freeing the transaction. Either of these actions will unlock the config.

\n", "group": "config" }, - "git_cred_userpass": { + "git_credential_free": { "type": "function", - "file": "cred_helpers.h", - "line": 43, - "lineto": 48, + "file": "git2/credential.h", + "line": 149, + "lineto": 149, + "args": [ + { + "name": "cred", + "type": "git_credential *", + "comment": "the object to free" + } + ], + "argline": "git_credential *cred", + "sig": "git_credential *", + "return": { "type": "void", "comment": null }, + "description": "

Free a credential.

\n", + "comments": "

This is only necessary if you own the object; that is, if you are a transport.

\n", + "group": "credential" + }, + "git_credential_has_username": { + "type": "function", + "file": "git2/credential.h", + "line": 157, + "lineto": 157, + "args": [ + { + "name": "cred", + "type": "git_credential *", + "comment": "object to check" + } + ], + "argline": "git_credential *cred", + "sig": "git_credential *", + "return": { + "type": "int", + "comment": " 1 if the credential object has non-NULL username, 0 otherwise" + }, + "description": "

Check whether a credential object contains username information.

\n", + "comments": "", + "group": "credential" + }, + "git_credential_get_username": { + "type": "function", + "file": "git2/credential.h", + "line": 165, + "lineto": 165, "args": [ { "name": "cred", - "type": "git_cred **", + "type": "git_credential *", + "comment": "object to check" + } + ], + "argline": "git_credential *cred", + "sig": "git_credential *", + "return": { + "type": "const char *", + "comment": " the credential username, or NULL if not applicable" + }, + "description": "

Return the username associated with a credential object.

\n", + "comments": "", + "group": "credential" + }, + "git_credential_userpass_plaintext_new": { + "type": "function", + "file": "git2/credential.h", + "line": 176, + "lineto": 179, + "args": [ + { + "name": "out", + "type": "git_credential **", "comment": "The newly created credential object." }, { - "name": "url", + "name": "username", "type": "const char *", - "comment": "The resource for which we are demanding a credential." + "comment": "The username of the credential." }, { - "name": "user_from_url", + "name": "password", "type": "const char *", - "comment": "The username that was embedded in a \"user\n@\nhost\"\n remote url, or NULL if not included." - }, - { - "name": "allowed_types", - "type": "unsigned int", - "comment": "A bitmask stating which cred types are OK to return." - }, - { - "name": "payload", - "type": "void *", - "comment": "The payload provided when specifying this callback. (This is\n interpreted as a `git_cred_userpass_payload*`.)" + "comment": "The password of the credential." } ], - "argline": "git_cred **cred, const char *url, const char *user_from_url, unsigned int allowed_types, void *payload", - "sig": "git_cred **::const char *::const char *::unsigned int::void *", + "argline": "git_credential **out, const char *username, const char *password", + "sig": "git_credential **::const char *::const char *", "return": { "type": "int", - "comment": null + "comment": " 0 for success or an error code for failure" }, - "description": "

Stock callback usable as a git_cred_acquire_cb. This calls\n git_cred_userpass_plaintext_new unless the protocol has not specified\n GIT_CREDTYPE_USERPASS_PLAINTEXT as an allowed type.

\n", + "description": "

Create a new plain-text username and password credential object.\n The supplied credential parameter will be internally duplicated.

\n", "comments": "", - "group": "cred" + "group": "credential" }, - "git_describe_commit": { + "git_credential_default_new": { "type": "function", - "file": "describe.h", - "line": 123, - "lineto": 126, + "file": "git2/credential.h", + "line": 188, + "lineto": 188, "args": [ { - "name": "result", - "type": "git_describe_result **", - "comment": "pointer to store the result. You must free this once\n you're done with it." - }, - { - "name": "committish", - "type": "git_object *", - "comment": "a committish to describe" - }, - { - "name": "opts", - "type": "git_describe_options *", - "comment": "the lookup options" + "name": "out", + "type": "git_credential **", + "comment": "The newly created credential object." } ], - "argline": "git_describe_result **result, git_object *committish, git_describe_options *opts", - "sig": "git_describe_result **::git_object *::git_describe_options *", + "argline": "git_credential **out", + "sig": "git_credential **", "return": { "type": "int", - "comment": null + "comment": " 0 for success or an error code for failure" }, - "description": "

Describe a commit

\n", - "comments": "

Perform the describe operation on the given committish object.

\n", - "group": "describe", - "examples": { - "describe.c": [ - "ex/HEAD/describe.html#git_describe_commit-1" - ] - } + "description": "

Create a "default" credential usable for Negotiate mechanisms like NTLM\n or Kerberos authentication.

\n", + "comments": "", + "group": "credential" }, - "git_describe_workdir": { + "git_credential_username_new": { "type": "function", - "file": "describe.h", - "line": 140, - "lineto": 143, + "file": "git2/credential.h", + "line": 200, + "lineto": 200, "args": [ { "name": "out", - "type": "git_describe_result **", - "comment": "pointer to store the result. You must free this once\n you're done with it." - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository in which to perform the describe" + "type": "git_credential **", + "comment": "The newly created credential object." }, { - "name": "opts", - "type": "git_describe_options *", - "comment": "the lookup options" + "name": "username", + "type": "const char *", + "comment": "The username to authenticate with" } ], - "argline": "git_describe_result **out, git_repository *repo, git_describe_options *opts", - "sig": "git_describe_result **::git_repository *::git_describe_options *", + "argline": "git_credential **out, const char *username", + "sig": "git_credential **::const char *", "return": { "type": "int", - "comment": null + "comment": " 0 for success or an error code for failure" }, - "description": "

Describe a commit

\n", - "comments": "

Perform the describe operation on the current commit and the worktree. After peforming describe on HEAD, a status is run and the description is considered to be dirty if there are.

\n", - "group": "describe", - "examples": { - "describe.c": [ - "ex/HEAD/describe.html#git_describe_workdir-2" - ] - } + "description": "

Create a credential to specify a username.

\n", + "comments": "

This is used with ssh authentication to query for the username if none is specified in the url.

\n", + "group": "credential" }, - "git_describe_format": { + "git_credential_ssh_key_new": { "type": "function", - "file": "describe.h", - "line": 153, - "lineto": 156, + "file": "git2/credential.h", + "line": 213, + "lineto": 218, "args": [ { "name": "out", - "type": "git_buf *", - "comment": "The buffer to store the result" + "type": "git_credential **", + "comment": "The newly created credential object." }, { - "name": "result", - "type": "const git_describe_result *", - "comment": "the result from `git_describe_commit()` or\n `git_describe_workdir()`." + "name": "username", + "type": "const char *", + "comment": "username to use to authenticate" }, { - "name": "opts", - "type": "const git_describe_format_options *", - "comment": "the formatting options" + "name": "publickey", + "type": "const char *", + "comment": "The path to the public key of the credential." + }, + { + "name": "privatekey", + "type": "const char *", + "comment": "The path to the private key of the credential." + }, + { + "name": "passphrase", + "type": "const char *", + "comment": "The passphrase of the credential." } ], - "argline": "git_buf *out, const git_describe_result *result, const git_describe_format_options *opts", - "sig": "git_buf *::const git_describe_result *::const git_describe_format_options *", + "argline": "git_credential **out, const char *username, const char *publickey, const char *privatekey, const char *passphrase", + "sig": "git_credential **::const char *::const char *::const char *::const char *", "return": { "type": "int", - "comment": null + "comment": " 0 for success or an error code for failure" }, - "description": "

Print the describe result to a buffer

\n", + "description": "

Create a new passphrase-protected ssh key credential object.\n The supplied credential parameter will be internally duplicated.

\n", "comments": "", - "group": "describe", - "examples": { - "describe.c": [ - "ex/HEAD/describe.html#git_describe_format-3" - ] - } + "group": "credential" }, - "git_describe_result_free": { + "git_credential_ssh_key_memory_new": { "type": "function", - "file": "describe.h", - "line": 161, - "lineto": 161, + "file": "git2/credential.h", + "line": 230, + "lineto": 235, "args": [ { - "name": "result", - "type": "git_describe_result *", - "comment": null + "name": "out", + "type": "git_credential **", + "comment": "The newly created credential object." + }, + { + "name": "username", + "type": "const char *", + "comment": "username to use to authenticate." + }, + { + "name": "publickey", + "type": "const char *", + "comment": "The public key of the credential." + }, + { + "name": "privatekey", + "type": "const char *", + "comment": "The private key of the credential." + }, + { + "name": "passphrase", + "type": "const char *", + "comment": "The passphrase of the credential." } ], - "argline": "git_describe_result *result", - "sig": "git_describe_result *", + "argline": "git_credential **out, const char *username, const char *publickey, const char *privatekey, const char *passphrase", + "sig": "git_credential **::const char *::const char *::const char *::const char *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 for success or an error code for failure" }, - "description": "

Free the describe result.

\n", + "description": "

Create a new ssh key credential object reading the keys from memory.

\n", "comments": "", - "group": "describe" + "group": "credential" }, - "git_diff_init_options": { + "git_credential_ssh_interactive_new": { "type": "function", - "file": "diff.h", - "line": 447, - "lineto": 449, + "file": "git2/credential.h", + "line": 278, + "lineto": 282, "args": [ { - "name": "opts", - "type": "git_diff_options *", - "comment": "The `git_diff_options` struct to initialize" + "name": "out", + "type": "git_credential **", + "comment": "The newly created credential object." }, { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_DIFF_OPTIONS_VERSION`" + "name": "username", + "type": "const char *", + "comment": "Username to use to authenticate." + }, + { + "name": "prompt_callback", + "type": "git_credential_ssh_interactive_cb", + "comment": "The callback method used for prompts." + }, + { + "name": "payload", + "type": "void *", + "comment": "Additional data to pass to the callback." } ], - "argline": "git_diff_options *opts, unsigned int version", - "sig": "git_diff_options *::unsigned int", + "argline": "git_credential **out, const char *username, git_credential_ssh_interactive_cb prompt_callback, void *payload", + "sig": "git_credential **::const char *::git_credential_ssh_interactive_cb::void *", "return": { "type": "int", - "comment": " Zero on success; -1 on failure." + "comment": " 0 for success or an error code for failure." }, - "description": "

Initializes a git_diff_options with default values. Equivalent to\n creating an instance with GIT_DIFF_OPTIONS_INIT.

\n", + "description": "

Create a new ssh keyboard-interactive based credential object.\n The supplied credential parameter will be internally duplicated.

\n", "comments": "", - "group": "diff" + "group": "credential" }, - "git_diff_find_init_options": { + "git_credential_ssh_key_from_agent": { "type": "function", - "file": "diff.h", - "line": 742, - "lineto": 744, + "file": "git2/credential.h", + "line": 292, + "lineto": 294, "args": [ { - "name": "opts", - "type": "git_diff_find_options *", - "comment": "The `git_diff_find_options` struct to initialize" + "name": "out", + "type": "git_credential **", + "comment": "The newly created credential object." }, { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_DIFF_FIND_OPTIONS_VERSION`" + "name": "username", + "type": "const char *", + "comment": "username to use to authenticate" } ], - "argline": "git_diff_find_options *opts, unsigned int version", - "sig": "git_diff_find_options *::unsigned int", + "argline": "git_credential **out, const char *username", + "sig": "git_credential **::const char *", "return": { "type": "int", - "comment": " Zero on success; -1 on failure." + "comment": " 0 for success or an error code for failure" }, - "description": "

Initializes a git_diff_find_options with default values. Equivalent to\n creating an instance with GIT_DIFF_FIND_OPTIONS_INIT.

\n", + "description": "

Create a new ssh key credential object used for querying an ssh-agent.\n The supplied credential parameter will be internally duplicated.

\n", "comments": "", - "group": "diff" + "group": "credential" }, - "git_diff_free": { + "git_credential_ssh_custom_new": { "type": "function", - "file": "diff.h", - "line": 758, - "lineto": 758, + "file": "git2/credential.h", + "line": 332, + "lineto": 338, "args": [ { - "name": "diff", - "type": "git_diff *", - "comment": "The previously created diff; cannot be used after free." - } - ], - "argline": "git_diff *diff", - "sig": "git_diff *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Deallocate a diff.

\n", - "comments": "", - "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_free-2" - ], - "log.c": [ - "ex/HEAD/log.html#git_diff_free-25", - "ex/HEAD/log.html#git_diff_free-26" - ] - } - }, - "git_diff_tree_to_tree": { - "type": "function", - "file": "diff.h", - "line": 776, - "lineto": 781, - "args": [ + "name": "out", + "type": "git_credential **", + "comment": "The newly created credential object." + }, { - "name": "diff", - "type": "git_diff **", - "comment": "Output pointer to a git_diff pointer to be allocated." + "name": "username", + "type": "const char *", + "comment": "username to use to authenticate" }, { - "name": "repo", - "type": "git_repository *", - "comment": "The repository containing the trees." + "name": "publickey", + "type": "const char *", + "comment": "The bytes of the public key." }, { - "name": "old_tree", - "type": "git_tree *", - "comment": "A git_tree object to diff from, or NULL for empty tree." + "name": "publickey_len", + "type": "size_t", + "comment": "The length of the public key in bytes." }, { - "name": "new_tree", - "type": "git_tree *", - "comment": "A git_tree object to diff to, or NULL for empty tree." + "name": "sign_callback", + "type": "git_credential_sign_cb", + "comment": "The callback method to sign the data during the challenge." }, { - "name": "opts", - "type": "const git_diff_options *", - "comment": "Structure with options to influence diff or NULL for defaults." + "name": "payload", + "type": "void *", + "comment": "Additional data to pass to the callback." } ], - "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, git_tree *new_tree, const git_diff_options *opts", - "sig": "git_diff **::git_repository *::git_tree *::git_tree *::const git_diff_options *", + "argline": "git_credential **out, const char *username, const char *publickey, size_t publickey_len, git_credential_sign_cb sign_callback, void *payload", + "sig": "git_credential **::const char *::const char *::size_t::git_credential_sign_cb::void *", "return": { "type": "int", - "comment": null + "comment": " 0 for success or an error code for failure" }, - "description": "

Create a diff with the difference between two tree objects.

\n", - "comments": "

This is equivalent to git diff <old-tree> <new-tree>

\n\n

The first tree will be used for the "old_file" side of the delta and the second tree will be used for the "new_file" side of the delta. You can pass NULL to indicate an empty tree, although it is an error to pass NULL for both the old_tree and new_tree.

\n", - "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_tree_to_tree-3" - ], - "log.c": [ - "ex/HEAD/log.html#git_diff_tree_to_tree-27", - "ex/HEAD/log.html#git_diff_tree_to_tree-28" - ] - } + "description": "

Create an ssh key credential with a custom signing function.

\n", + "comments": "

This lets you use your own function to sign the challenge.

\n\n

This function and its credential type is provided for completeness and wraps libssh2_userauth_publickey(), which is undocumented.

\n\n

The supplied credential parameter will be internally duplicated.

\n", + "group": "credential" }, - "git_diff_tree_to_index": { + "git_credential_userpass": { "type": "function", - "file": "diff.h", - "line": 802, - "lineto": 807, + "file": "git2/credential_helpers.h", + "line": 44, + "lineto": 49, "args": [ { - "name": "diff", - "type": "git_diff **", - "comment": "Output pointer to a git_diff pointer to be allocated." + "name": "out", + "type": "git_credential **", + "comment": "The newly created credential object." }, { - "name": "repo", - "type": "git_repository *", - "comment": "The repository containing the tree and index." + "name": "url", + "type": "const char *", + "comment": "The resource for which we are demanding a credential." }, { - "name": "old_tree", - "type": "git_tree *", - "comment": "A git_tree object to diff from, or NULL for empty tree." + "name": "user_from_url", + "type": "const char *", + "comment": "The username that was embedded in a \"user\n@\nhost\"\n remote url, or NULL if not included." }, { - "name": "index", - "type": "git_index *", - "comment": "The index to diff with; repo index used if NULL." + "name": "allowed_types", + "type": "unsigned int", + "comment": "A bitmask stating which credential types are OK to return." }, { - "name": "opts", - "type": "const git_diff_options *", - "comment": "Structure with options to influence diff or NULL for defaults." + "name": "payload", + "type": "void *", + "comment": "The payload provided when specifying this callback. (This is\n interpreted as a `git_credential_userpass_payload*`.)" } ], - "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, git_index *index, const git_diff_options *opts", - "sig": "git_diff **::git_repository *::git_tree *::git_index *::const git_diff_options *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Create a diff between a tree and repository index.

\n", - "comments": "

This is equivalent to git diff --cached <treeish> or if you pass the HEAD tree, then like git diff --cached.

\n\n

The tree you pass will be used for the "old_file" side of the delta, and the index will be used for the "new_file" side of the delta.

\n\n

If you pass NULL for the index, then the existing index of the repo will be used. In this case, the index will be refreshed from disk (if it has changed) before the diff is generated.

\n", - "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_tree_to_index-4" - ] - } + "argline": "git_credential **out, const char *url, const char *user_from_url, unsigned int allowed_types, void *payload", + "sig": "git_credential **::const char *::const char *::unsigned int::void *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Stock callback usable as a git_credential_acquire_cb. This calls\n git_cred_userpass_plaintext_new unless the protocol has not specified\n GIT_CREDENTIAL_USERPASS_PLAINTEXT as an allowed type.

\n", + "comments": "", + "group": "credential" }, - "git_diff_index_to_workdir": { + "git_blob_filtered_content": { "type": "function", - "file": "diff.h", - "line": 829, - "lineto": 833, + "file": "git2/deprecated.h", + "line": 124, + "lineto": 128, + "args": [ + { "name": "out", "type": "git_buf *", "comment": null }, + { "name": "blob", "type": "git_blob *", "comment": null }, + { "name": "as_path", "type": "const char *", "comment": null }, + { "name": "check_for_binary_data", "type": "int", "comment": null } + ], + "argline": "git_buf *out, git_blob *blob, const char *as_path, int check_for_binary_data", + "sig": "git_buf *::git_blob *::const char *::int", + "return": { "type": "int", "comment": null }, + "description": "

Deprecated in favor of git_blob_filter.

\n", + "comments": "", + "group": "blob" + }, + "git_filter_list_stream_data": { + "type": "function", + "file": "git2/deprecated.h", + "line": 148, + "lineto": 151, + "args": [ + { "name": "filters", "type": "git_filter_list *", "comment": null }, + { "name": "data", "type": "git_buf *", "comment": null }, + { "name": "target", "type": "git_writestream *", "comment": null } + ], + "argline": "git_filter_list *filters, git_buf *data, git_writestream *target", + "sig": "git_filter_list *::git_buf *::git_writestream *", + "return": { "type": "int", "comment": null }, + "description": "

Deprecated in favor of git_filter_list_stream_buffer.

\n", + "comments": "", + "group": "filter" + }, + "git_filter_list_apply_to_data": { + "type": "function", + "file": "git2/deprecated.h", + "line": 158, + "lineto": 161, + "args": [ + { "name": "out", "type": "git_buf *", "comment": null }, + { "name": "filters", "type": "git_filter_list *", "comment": null }, + { "name": "in", "type": "git_buf *", "comment": null } + ], + "argline": "git_buf *out, git_filter_list *filters, git_buf *in", + "sig": "git_buf *::git_filter_list *::git_buf *", + "return": { "type": "int", "comment": null }, + "description": "

Deprecated in favor of git_filter_list_apply_to_buffer.

\n", + "comments": "", + "group": "filter" + }, + "git_treebuilder_write_with_buffer": { + "type": "function", + "file": "git2/deprecated.h", + "line": 187, + "lineto": 188, + "args": [ + { "name": "oid", "type": "git_oid *", "comment": null }, + { "name": "bld", "type": "git_treebuilder *", "comment": null }, + { "name": "tree", "type": "git_buf *", "comment": null } + ], + "argline": "git_oid *oid, git_treebuilder *bld, git_buf *tree", + "sig": "git_oid *::git_treebuilder *::git_buf *", + "return": { "type": "int", "comment": null }, + "description": "

Write the contents of the tree builder as a tree object.\n This is an alias of git_treebuilder_write and is preserved\n for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "treebuilder" + }, + "git_buf_grow": { + "type": "function", + "file": "git2/deprecated.h", + "line": 229, + "lineto": 229, "args": [ { - "name": "diff", - "type": "git_diff **", - "comment": "Output pointer to a git_diff pointer to be allocated." - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "The repository." - }, - { - "name": "index", - "type": "git_index *", - "comment": "The index to diff from; repo index used if NULL." + "name": "buffer", + "type": "git_buf *", + "comment": "The buffer to be resized; may or may not be allocated yet" }, { - "name": "opts", - "type": "const git_diff_options *", - "comment": "Structure with options to influence diff or NULL for defaults." + "name": "target_size", + "type": "size_t", + "comment": "The desired available size" } ], - "argline": "git_diff **diff, git_repository *repo, git_index *index, const git_diff_options *opts", - "sig": "git_diff **::git_repository *::git_index *::const git_diff_options *", + "argline": "git_buf *buffer, size_t target_size", + "sig": "git_buf *::size_t", "return": { "type": "int", - "comment": null + "comment": " 0 on success, -1 on allocation failure" }, - "description": "

Create a diff between the repository index and the workdir directory.

\n", - "comments": "

This matches the git diff command. See the note below on git_diff_tree_to_workdir for a discussion of the difference between git diff and git diff HEAD and how to emulate a git diff <treeish> using libgit2.

\n\n

The index will be used for the "old_file" side of the delta, and the working directory will be used for the "new_file" side of the delta.

\n\n

If you pass NULL for the index, then the existing index of the repo will be used. In this case, the index will be refreshed from disk (if it has changed) before the diff is generated.

\n", - "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_index_to_workdir-5" - ] - } + "description": "

Resize the buffer allocation to make more space.

\n", + "comments": "

This will attempt to grow the buffer to accommodate the target size.

\n\n

If the buffer refers to memory that was not allocated by libgit2 (i.e. the asize field is zero), then ptr will be replaced with a newly allocated block of data. Be careful so that memory allocated by the caller is not lost. As a special variant, if you pass target_size as 0 and the memory is not allocated by libgit2, this will allocate a new buffer of size size and copy the external data into it.

\n\n

Currently, this will never shrink a buffer, only expand it.

\n\n

If the allocation fails, this will return an error and the buffer will be marked as invalid for future operations, invaliding the contents.

\n", + "group": "buf" }, - "git_diff_tree_to_workdir": { + "git_buf_set": { "type": "function", - "file": "diff.h", - "line": 858, - "lineto": 862, + "file": "git2/deprecated.h", + "line": 239, + "lineto": 240, "args": [ { - "name": "diff", - "type": "git_diff **", - "comment": "A pointer to a git_diff pointer that will be allocated." - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "The repository containing the tree." + "name": "buffer", + "type": "git_buf *", + "comment": "The buffer to set" }, { - "name": "old_tree", - "type": "git_tree *", - "comment": "A git_tree object to diff from, or NULL for empty tree." + "name": "data", + "type": "const void *", + "comment": "The data to copy into the buffer" }, { - "name": "opts", - "type": "const git_diff_options *", - "comment": "Structure with options to influence diff or NULL for defaults." + "name": "datalen", + "type": "size_t", + "comment": "The length of the data to copy into the buffer" } ], - "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, const git_diff_options *opts", - "sig": "git_diff **::git_repository *::git_tree *::const git_diff_options *", + "argline": "git_buf *buffer, const void *data, size_t datalen", + "sig": "git_buf *::const void *::size_t", "return": { "type": "int", - "comment": null + "comment": " 0 on success, -1 on allocation failure" }, - "description": "

Create a diff between a tree and the working directory.

\n", - "comments": "

The tree you provide will be used for the "old_file" side of the delta, and the working directory will be used for the "new_file" side.

\n\n

This is not the same as git diff <treeish> or git diff-index <treeish>. Those commands use information from the index, whereas this function strictly returns the differences between the tree and the files in the working directory, regardless of the state of the index. Use git_diff_tree_to_workdir_with_index to emulate those commands.

\n\n

To see difference between this and git_diff_tree_to_workdir_with_index, consider the example of a staged file deletion where the file has then been put back into the working dir and further modified. The tree-to-workdir diff for that file is 'modified', but git diff would show status 'deleted' since there is a staged delete.

\n", - "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_tree_to_workdir-6" - ] - } + "description": "

Set buffer to a copy of some raw data.

\n", + "comments": "", + "group": "buf" }, - "git_diff_tree_to_workdir_with_index": { + "git_buf_is_binary": { "type": "function", - "file": "diff.h", - "line": 877, - "lineto": 881, + "file": "git2/deprecated.h", + "line": 248, + "lineto": 248, "args": [ { - "name": "diff", - "type": "git_diff **", - "comment": "A pointer to a git_diff pointer that will be allocated." - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "The repository containing the tree." - }, - { - "name": "old_tree", - "type": "git_tree *", - "comment": "A git_tree object to diff from, or NULL for empty tree." - }, - { - "name": "opts", - "type": "const git_diff_options *", - "comment": "Structure with options to influence diff or NULL for defaults." + "name": "buf", + "type": "const git_buf *", + "comment": "Buffer to check" } ], - "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, const git_diff_options *opts", - "sig": "git_diff **::git_repository *::git_tree *::const git_diff_options *", + "argline": "const git_buf *buf", + "sig": "const git_buf *", "return": { "type": "int", - "comment": null + "comment": " 1 if buffer looks like non-text data" }, - "description": "

Create a diff between a tree and the working directory using index data\n to account for staged deletes, tracked files, etc.

\n", - "comments": "

This emulates git diff <tree> by diffing the tree to the index and the index to the working directory and blending the results into a single diff that includes staged deleted, etc.

\n", - "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_tree_to_workdir_with_index-7" - ] - } + "description": "

Check quickly if buffer looks like it contains binary data

\n", + "comments": "", + "group": "buf" }, - "git_diff_index_to_index": { + "git_buf_contains_nul": { "type": "function", - "file": "diff.h", - "line": 895, - "lineto": 900, + "file": "git2/deprecated.h", + "line": 256, + "lineto": 256, "args": [ { - "name": "diff", - "type": "git_diff **", - "comment": "Output pointer to a git_diff pointer to be allocated." - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "The repository containing the indexes." - }, - { - "name": "old_index", - "type": "git_index *", - "comment": "A git_index object to diff from." - }, - { - "name": "new_index", - "type": "git_index *", - "comment": "A git_index object to diff to." - }, - { - "name": "opts", - "type": "const git_diff_options *", - "comment": "Structure with options to influence diff or NULL for defaults." + "name": "buf", + "type": "const git_buf *", + "comment": "Buffer to check" } ], - "argline": "git_diff **diff, git_repository *repo, git_index *old_index, git_index *new_index, const git_diff_options *opts", - "sig": "git_diff **::git_repository *::git_index *::git_index *::const git_diff_options *", + "argline": "const git_buf *buf", + "sig": "const git_buf *", "return": { "type": "int", - "comment": null + "comment": " 1 if buffer contains a NUL byte" }, - "description": "

Create a diff with the difference between two index objects.

\n", - "comments": "

The first index will be used for the "old_file" side of the delta and the second index will be used for the "new_file" side of the delta.

\n", - "group": "diff" + "description": "

Check quickly if buffer contains a NUL byte

\n", + "comments": "", + "group": "buf" }, - "git_diff_merge": { + "git_buf_free": { + "type": "function", + "file": "git2/deprecated.h", + "line": 268, + "lineto": 268, + "args": [{ "name": "buffer", "type": "git_buf *", "comment": null }], + "argline": "git_buf *buffer", + "sig": "git_buf *", + "return": { "type": "void", "comment": null }, + "description": "

Free the memory referred to by the git_buf. This is an alias of\n git_buf_dispose and is preserved for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "buf" + }, + "git_diff_format_email": { "type": "function", - "file": "diff.h", - "line": 915, - "lineto": 917, + "file": "git2/deprecated.h", + "line": 374, + "lineto": 377, "args": [ + { "name": "out", "type": "git_buf *", "comment": null }, + { "name": "diff", "type": "git_diff *", "comment": null }, { - "name": "onto", - "type": "git_diff *", - "comment": "Diff to merge into." - }, + "name": "opts", + "type": "const git_diff_format_email_options *", + "comment": null + } + ], + "argline": "git_buf *out, git_diff *diff, const git_diff_format_email_options *opts", + "sig": "git_buf *::git_diff *::const git_diff_format_email_options *", + "return": { "type": "int", "comment": null }, + "description": "

Create an e-mail ready patch from a diff.

\n", + "comments": "", + "group": "diff" + }, + "git_diff_commit_as_email": { + "type": "function", + "file": "git2/deprecated.h", + "line": 385, + "lineto": 392, + "args": [ + { "name": "out", "type": "git_buf *", "comment": null }, + { "name": "repo", "type": "git_repository *", "comment": null }, + { "name": "commit", "type": "git_commit *", "comment": null }, + { "name": "patch_no", "type": "size_t", "comment": null }, + { "name": "total_patches", "type": "size_t", "comment": null }, + { "name": "flags", "type": "uint32_t", "comment": null }, { - "name": "from", - "type": "const git_diff *", - "comment": "Diff to merge." + "name": "diff_opts", + "type": "const git_diff_options *", + "comment": null } ], - "argline": "git_diff *onto, const git_diff *from", - "sig": "git_diff *::const git_diff *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Merge one diff into another.

\n", - "comments": "

This merges items from the "from" list into the "onto" list. The resulting diff will have all items that appear in either list. If an item appears in both lists, then it will be "merged" to appear as if the old version was from the "onto" list and the new version is from the "from" list (with the exception that if the item has a pending DELETE in the middle, then it will show as deleted).

\n", + "argline": "git_buf *out, git_repository *repo, git_commit *commit, size_t patch_no, size_t total_patches, uint32_t flags, const git_diff_options *diff_opts", + "sig": "git_buf *::git_repository *::git_commit *::size_t::size_t::uint32_t::const git_diff_options *", + "return": { "type": "int", "comment": null }, + "description": "

Create an e-mail ready patch for a commit.

\n", + "comments": "", "group": "diff" }, - "git_diff_find_similar": { + "git_diff_format_email_options_init": { "type": "function", - "file": "diff.h", - "line": 931, - "lineto": 933, + "file": "git2/deprecated.h", + "line": 404, + "lineto": 406, "args": [ { - "name": "diff", - "type": "git_diff *", - "comment": "diff to run detection algorithms on" + "name": "opts", + "type": "git_diff_format_email_options *", + "comment": "The `git_blame_options` struct to initialize." }, { - "name": "options", - "type": "const git_diff_find_options *", - "comment": "Control how detection should be run, NULL for defaults" + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_DIFF_FORMAT_EMAIL_OPTIONS_VERSION`." } ], - "argline": "git_diff *diff, const git_diff_find_options *options", - "sig": "git_diff *::const git_diff_find_options *", + "argline": "git_diff_format_email_options *opts, unsigned int version", + "sig": "git_diff_format_email_options *::unsigned int", "return": { "type": "int", - "comment": " 0 on success, -1 on failure" + "comment": " Zero on success; -1 on failure." }, - "description": "

Transform a diff marking file renames, copies, etc.

\n", - "comments": "

This modifies a diff in place, replacing old entries that look like renames or copies with new entries reflecting those changes. This also will, if requested, break modified files into add/remove pairs if the amount of change is above a threshold.

\n", - "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_find_similar-8" - ] - } + "description": "

Initialize git_diff_format_email_options structure

\n", + "comments": "

Initializes a git_diff_format_email_options with default values. Equivalent to creating an instance with GIT_DIFF_FORMAT_EMAIL_OPTIONS_INIT.

\n", + "group": "diff" }, - "git_diff_num_deltas": { + "giterr_last": { "type": "function", - "file": "diff.h", - "line": 951, - "lineto": 951, + "file": "git2/deprecated.h", + "line": 503, + "lineto": 503, + "args": [], + "argline": "", + "sig": "", + "return": { "type": "const git_error *", "comment": null }, + "description": "

Return the last git_error object that was generated for the\n current thread. This is an alias of git_error_last and is\n preserved for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "giterr" + }, + "giterr_clear": { + "type": "function", + "file": "git2/deprecated.h", + "line": 515, + "lineto": 515, + "args": [], + "argline": "", + "sig": "", + "return": { "type": "void", "comment": null }, + "description": "

Clear the last error. This is an alias of git_error_last and is\n preserved for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "giterr" + }, + "giterr_set_str": { + "type": "function", + "file": "git2/deprecated.h", + "line": 527, + "lineto": 527, "args": [ - { - "name": "diff", - "type": "const git_diff *", - "comment": "A git_diff generated by one of the above functions" - } + { "name": "error_class", "type": "int", "comment": null }, + { "name": "string", "type": "const char *", "comment": null } ], - "argline": "const git_diff *diff", - "sig": "const git_diff *", - "return": { - "type": "size_t", - "comment": " Count of number of deltas in the list" - }, - "description": "

Query how many diff records are there in a diff.

\n", - "comments": "", - "group": "diff", - "examples": { - "log.c": [ - "ex/HEAD/log.html#git_diff_num_deltas-29" - ] - } + "argline": "int error_class, const char *string", + "sig": "int::const char *", + "return": { "type": "void", "comment": null }, + "description": "

Sets the error message to the given string. This is an alias of\n git_error_set_str and is preserved for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "giterr" }, - "git_diff_num_deltas_of_type": { + "giterr_set_oom": { + "type": "function", + "file": "git2/deprecated.h", + "line": 539, + "lineto": 539, + "args": [], + "argline": "", + "sig": "", + "return": { "type": "void", "comment": null }, + "description": "

Indicates that an out-of-memory situation occurred. This is an alias\n of git_error_set_oom and is preserved for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "giterr" + }, + "git_object__size": { "type": "function", - "file": "diff.h", - "line": 964, - "lineto": 965, + "file": "git2/deprecated.h", + "line": 666, + "lineto": 666, "args": [ - { - "name": "diff", - "type": "const git_diff *", - "comment": "A git_diff generated by one of the above functions" - }, { "name": "type", - "type": "git_delta_t", - "comment": "A git_delta_t value to filter the count" + "type": "git_object_t", + "comment": "object type to get its size" } ], - "argline": "const git_diff *diff, git_delta_t type", - "sig": "const git_diff *::git_delta_t", - "return": { - "type": "size_t", - "comment": " Count of number of deltas matching delta_t type" - }, - "description": "

Query how many diff deltas are there in a diff filtered by type.

\n", - "comments": "

This works just like git_diff_entrycount() with an extra parameter that is a git_delta_t and returns just the count of how many deltas match that particular type.

\n", - "group": "diff" + "argline": "git_object_t type", + "sig": "git_object_t", + "return": { "type": "size_t", "comment": " size in bytes of the object" }, + "description": "

Get the size in bytes for the structure which\n acts as an in-memory representation of any given\n object type.

\n", + "comments": "

For all the core types, this would the equivalent of calling sizeof(git_commit) if the core types were not opaque on the external API.

\n", + "group": "object" }, - "git_diff_get_delta": { + "git_remote_is_valid_name": { "type": "function", - "file": "diff.h", - "line": 984, - "lineto": 985, + "file": "git2/deprecated.h", + "line": 687, + "lineto": 687, "args": [ { - "name": "diff", - "type": "const git_diff *", - "comment": "Diff list object" - }, - { - "name": "idx", - "type": "size_t", - "comment": "Index into diff list" + "name": "remote_name", + "type": "const char *", + "comment": "name to be checked." } ], - "argline": "const git_diff *diff, size_t idx", - "sig": "const git_diff *::size_t", + "argline": "const char *remote_name", + "sig": "const char *", "return": { - "type": "const git_diff_delta *", - "comment": " Pointer to git_diff_delta (or NULL if `idx` out of range)" + "type": "int", + "comment": " 1 if the reference name is acceptable; 0 if it isn't" }, - "description": "

Return the diff delta for an entry in the diff list.

\n", - "comments": "

The git_diff_delta pointer points to internal data and you do not have to release it when you are done with it. It will go away when the * git_diff (or any associated git_patch) goes away.

\n\n

Note that the flags on the delta related to whether it has binary content or not may not be set if there are no attributes set for the file and there has been no reason to load the file data at this point. For now, if you need those flags to be up to date, your only option is to either use git_diff_foreach or create a git_patch.

\n", - "group": "diff" + "description": "

Ensure the remote name is well-formed.

\n", + "comments": "", + "group": "remote" }, - "git_diff_is_sorted_icase": { + "git_reference_is_valid_name": { "type": "function", - "file": "diff.h", - "line": 993, - "lineto": 993, + "file": "git2/deprecated.h", + "line": 741, + "lineto": 741, "args": [ { - "name": "diff", - "type": "const git_diff *", - "comment": "diff to check" + "name": "refname", + "type": "const char *", + "comment": "name to be checked." } ], - "argline": "const git_diff *diff", - "sig": "const git_diff *", + "argline": "const char *refname", + "sig": "const char *", "return": { "type": "int", - "comment": " 0 if case sensitive, 1 if case is ignored" + "comment": " 1 if the reference name is acceptable; 0 if it isn't" }, - "description": "

Check if deltas are sorted case sensitively or insensitively.

\n", - "comments": "", - "group": "diff" + "description": "

Ensure the reference name is well-formed.

\n", + "comments": "

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n", + "group": "reference" }, - "git_diff_foreach": { + "git_oidarray_free": { + "type": "function", + "file": "git2/deprecated.h", + "line": 922, + "lineto": 922, + "args": [{ "name": "array", "type": "git_oidarray *", "comment": null }], + "argline": "git_oidarray *array", + "sig": "git_oidarray *", + "return": { "type": "void", "comment": null }, + "description": "

Free the memory referred to by the git_oidarray. This is an alias of\n git_oidarray_dispose and is preserved for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "oidarray" + }, + "git_strarray_copy": { "type": "function", - "file": "diff.h", - "line": 1021, - "lineto": 1027, + "file": "git2/deprecated.h", + "line": 991, + "lineto": 991, "args": [ - { - "name": "diff", - "type": "git_diff *", - "comment": "A git_diff generated by one of the above functions." - }, - { - "name": "file_cb", - "type": "git_diff_file_cb", - "comment": "Callback function to make per file in the diff." - }, - { - "name": "binary_cb", - "type": "git_diff_binary_cb", - "comment": "Optional callback to make for binary files." - }, - { - "name": "hunk_cb", - "type": "git_diff_hunk_cb", - "comment": "Optional callback to make per hunk of text diff. This\n callback is called to describe a range of lines in the\n diff. It will not be issued for binary files." - }, - { - "name": "line_cb", - "type": "git_diff_line_cb", - "comment": "Optional callback to make per line of diff text. This\n same callback will be made for context lines, added, and\n removed lines, and even for a deleted trailing newline." - }, - { - "name": "payload", - "type": "void *", - "comment": "Reference pointer that will be passed to your callbacks." - } + { "name": "tgt", "type": "git_strarray *", "comment": "target" }, + { "name": "src", "type": "const git_strarray *", "comment": "source" } ], - "argline": "git_diff *diff, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", - "sig": "git_diff *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", + "argline": "git_strarray *tgt, const git_strarray *src", + "sig": "git_strarray *::const git_strarray *", "return": { "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" + "comment": " 0 on success, \n<\n 0 on allocation failure" }, - "description": "

Loop over all deltas in a diff issuing callbacks.

\n", - "comments": "

This will iterate through all of the files described in a diff. You should provide a file callback to learn about each file.

\n\n

The "hunk" and "line" callbacks are optional, and the text diff of the files will only be calculated if they are not NULL. Of course, these callbacks will not be invoked for binary files on the diff or for files whose only changed is a file mode change.

\n\n

Returning a non-zero value from any of the callbacks will terminate the iteration and return the value to the user.

\n", - "group": "diff" + "description": "

Copy a string array object from source to target.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "strarray" }, - "git_diff_status_char": { + "git_strarray_free": { "type": "function", - "file": "diff.h", - "line": 1040, - "lineto": 1040, + "file": "git2/deprecated.h", + "line": 1003, + "lineto": 1003, + "args": [{ "name": "array", "type": "git_strarray *", "comment": null }], + "argline": "git_strarray *array", + "sig": "git_strarray *", + "return": { "type": "void", "comment": null }, + "description": "

Free the memory referred to by the git_strarray. This is an alias of\n git_strarray_dispose and is preserved for backward compatibility.

\n", + "comments": "

This function is deprecated, but there is no plan to remove this function at this time.

\n", + "group": "strarray" + }, + "git_blame_init_options": { + "type": "function", + "file": "git2/deprecated.h", + "line": 1035, + "lineto": 1035, + "args": [ + { "name": "opts", "type": "git_blame_options *", "comment": null }, + { "name": "version", "type": "unsigned int", "comment": null } + ], + "argline": "git_blame_options *opts, unsigned int version", + "sig": "git_blame_options *::unsigned int", + "return": { "type": "int", "comment": null }, + "description": "", + "comments": "

These functions are retained for backward compatibility. The newer versions of these functions should be preferred in all new code.

\n\n

There is no plan to remove these backward compatibility functions at this time.

\n\n

@{

\n", + "group": "blame" + }, + "git_describe_options_init": { + "type": "function", + "file": "git2/describe.h", + "line": 91, + "lineto": 91, "args": [ { - "name": "status", - "type": "git_delta_t", - "comment": "The git_delta_t value to look up" + "name": "opts", + "type": "git_describe_options *", + "comment": "The `git_describe_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_DESCRIBE_OPTIONS_VERSION`." } ], - "argline": "git_delta_t status", - "sig": "git_delta_t", + "argline": "git_describe_options *opts, unsigned int version", + "sig": "git_describe_options *::unsigned int", "return": { - "type": "char", - "comment": " The single character label for that code" + "type": "int", + "comment": " Zero on success; -1 on failure." }, - "description": "

Look up the single character abbreviation for a delta status code.

\n", - "comments": "

When you run git diff --name-status it uses single letter codes in the output such as 'A' for added, 'D' for deleted, 'M' for modified, etc. This function converts a git_delta_t value into these letters for your own purposes. GIT_DELTA_UNTRACKED will return a space (i.e. ' ').

\n", - "group": "diff" + "description": "

Initialize git_describe_options structure

\n", + "comments": "

Initializes a git_describe_options with default values. Equivalent to creating an instance with GIT_DESCRIBE_OPTIONS_INIT.

\n", + "group": "describe", + "examples": { + "describe.c": ["ex/v1.9.1/describe.html#git_describe_options_init-1"] + } }, - "git_diff_print": { + "git_describe_format_options_init": { "type": "function", - "file": "diff.h", - "line": 1065, - "lineto": 1069, + "file": "git2/describe.h", + "line": 141, + "lineto": 141, "args": [ { - "name": "diff", - "type": "git_diff *", - "comment": "A git_diff generated by one of the above functions." - }, - { - "name": "format", - "type": "git_diff_format_t", - "comment": "A git_diff_format_t value to pick the text format." - }, - { - "name": "print_cb", - "type": "git_diff_line_cb", - "comment": "Callback to make per line of diff text." + "name": "opts", + "type": "git_describe_format_options *", + "comment": "The `git_describe_format_options` struct to initialize." }, { - "name": "payload", - "type": "void *", - "comment": "Reference pointer that will be passed to your callback." + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_DESCRIBE_FORMAT_OPTIONS_VERSION`." } ], - "argline": "git_diff *diff, git_diff_format_t format, git_diff_line_cb print_cb, void *payload", - "sig": "git_diff *::git_diff_format_t::git_diff_line_cb::void *", + "argline": "git_describe_format_options *opts, unsigned int version", + "sig": "git_describe_format_options *::unsigned int", "return": { "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" + "comment": " Zero on success; -1 on failure." }, - "description": "

Iterate over a diff generating formatted text output.

\n", - "comments": "

Returning a non-zero value from the callbacks will terminate the iteration and return the non-zero value to the caller.

\n", - "group": "diff", + "description": "

Initialize git_describe_format_options structure

\n", + "comments": "

Initializes a git_describe_format_options with default values. Equivalent to creating an instance with GIT_DESCRIBE_FORMAT_OPTIONS_INIT.

\n", + "group": "describe", "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_print-9" - ], - "log.c": [ - "ex/HEAD/log.html#git_diff_print-30" + "describe.c": [ + "ex/v1.9.1/describe.html#git_describe_format_options_init-2" ] } }, - "git_diff_to_buf": { + "git_describe_commit": { "type": "function", - "file": "diff.h", - "line": 1081, - "lineto": 1084, + "file": "git2/describe.h", + "line": 159, + "lineto": 162, "args": [ { - "name": "out", - "type": "git_buf *", - "comment": "A pointer to a user-allocated git_buf that will\n contain the diff text" + "name": "result", + "type": "git_describe_result **", + "comment": "pointer to store the result. You must free this once\n you're done with it." }, { - "name": "diff", - "type": "git_diff *", - "comment": "A git_diff generated by one of the above functions." + "name": "committish", + "type": "git_object *", + "comment": "a committish to describe" }, { - "name": "format", - "type": "git_diff_format_t", - "comment": "A git_diff_format_t value to pick the text format." + "name": "opts", + "type": "git_describe_options *", + "comment": "the lookup options (or NULL for defaults)" } ], - "argline": "git_buf *out, git_diff *diff, git_diff_format_t format", - "sig": "git_buf *::git_diff *::git_diff_format_t", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, - "description": "

Produce the complete formatted text output from a diff into a\n buffer.

\n", - "comments": "", - "group": "diff" + "argline": "git_describe_result **result, git_object *committish, git_describe_options *opts", + "sig": "git_describe_result **::git_object *::git_describe_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Describe a commit

\n", + "comments": "

Perform the describe operation on the given committish object.

\n", + "group": "describe", + "examples": { + "describe.c": ["ex/v1.9.1/describe.html#git_describe_commit-3"] + } }, - "git_diff_blobs": { + "git_describe_workdir": { "type": "function", - "file": "diff.h", - "line": 1121, - "lineto": 1131, + "file": "git2/describe.h", + "line": 177, + "lineto": 180, "args": [ { - "name": "old_blob", - "type": "const git_blob *", - "comment": "Blob for old side of diff, or NULL for empty blob" - }, - { - "name": "old_as_path", - "type": "const char *", - "comment": "Treat old blob as if it had this filename; can be NULL" + "name": "out", + "type": "git_describe_result **", + "comment": "pointer to store the result. You must free this once\n you're done with it." }, { - "name": "new_blob", - "type": "const git_blob *", - "comment": "Blob for new side of diff, or NULL for empty blob" + "name": "repo", + "type": "git_repository *", + "comment": "the repository in which to perform the describe" }, { - "name": "new_as_path", - "type": "const char *", - "comment": "Treat new blob as if it had this filename; can be NULL" - }, + "name": "opts", + "type": "git_describe_options *", + "comment": "the lookup options (or NULL for defaults)" + } + ], + "argline": "git_describe_result **out, git_repository *repo, git_describe_options *opts", + "sig": "git_describe_result **::git_repository *::git_describe_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Describe a commit

\n", + "comments": "

Perform the describe operation on the current commit and the worktree. After performing describe on HEAD, a status is run and the description is considered to be dirty if there are.

\n", + "group": "describe", + "examples": { + "describe.c": ["ex/v1.9.1/describe.html#git_describe_workdir-4"] + } + }, + "git_describe_format": { + "type": "function", + "file": "git2/describe.h", + "line": 191, + "lineto": 194, + "args": [ { - "name": "options", - "type": "const git_diff_options *", - "comment": "Options for diff, or NULL for default options" + "name": "out", + "type": "git_buf *", + "comment": "The buffer to store the result" }, { - "name": "file_cb", - "type": "git_diff_file_cb", - "comment": "Callback for \"file\"; made once if there is a diff; can be NULL" + "name": "result", + "type": "const git_describe_result *", + "comment": "the result from `git_describe_commit()` or\n `git_describe_workdir()`." }, { - "name": "binary_cb", - "type": "git_diff_binary_cb", - "comment": "Callback for binary files; can be NULL" - }, + "name": "opts", + "type": "const git_describe_format_options *", + "comment": "the formatting options (or NULL for defaults)" + } + ], + "argline": "git_buf *out, const git_describe_result *result, const git_describe_format_options *opts", + "sig": "git_buf *::const git_describe_result *::const git_describe_format_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Print the describe result to a buffer

\n", + "comments": "", + "group": "describe", + "examples": { + "describe.c": ["ex/v1.9.1/describe.html#git_describe_format-5"] + } + }, + "git_describe_result_free": { + "type": "function", + "file": "git2/describe.h", + "line": 201, + "lineto": 201, + "args": [ { - "name": "hunk_cb", - "type": "git_diff_hunk_cb", - "comment": "Callback for each hunk in diff; can be NULL" - }, + "name": "result", + "type": "git_describe_result *", + "comment": "The result to free." + } + ], + "argline": "git_describe_result *result", + "sig": "git_describe_result *", + "return": { "type": "void", "comment": null }, + "description": "

Free the describe result.

\n", + "comments": "", + "group": "describe" + }, + "git_diff_options_init": { + "type": "function", + "file": "git2/diff.h", + "line": 492, + "lineto": 494, + "args": [ { - "name": "line_cb", - "type": "git_diff_line_cb", - "comment": "Callback for each line in diff; can be NULL" + "name": "opts", + "type": "git_diff_options *", + "comment": "The `git_diff_options` struct to initialize." }, { - "name": "payload", - "type": "void *", - "comment": "Payload passed to each callback function" + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_DIFF_OPTIONS_VERSION`." } ], - "argline": "const git_blob *old_blob, const char *old_as_path, const git_blob *new_blob, const char *new_as_path, const git_diff_options *options, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", - "sig": "const git_blob *::const char *::const git_blob *::const char *::const git_diff_options *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", + "argline": "git_diff_options *opts, unsigned int version", + "sig": "git_diff_options *::unsigned int", "return": { "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" + "comment": " Zero on success; -1 on failure." }, - "description": "

Directly run a diff on two blobs.

\n", - "comments": "

Compared to a file, a blob lacks some contextual information. As such, the git_diff_file given to the callback will have some fake data; i.e. mode will be 0 and path will be NULL.

\n\n

NULL is allowed for either old_blob or new_blob and will be treated as an empty blob, with the oid set to NULL in the git_diff_file data. Passing NULL for both blobs is a noop; no callbacks will be made at all.

\n\n

We do run a binary content check on the blob content and if either blob looks like binary data, the git_diff_delta binary attribute will be set to 1 and no call to the hunk_cb nor line_cb will be made (unless you pass GIT_DIFF_FORCE_TEXT of course).

\n", + "description": "

Initialize git_diff_options structure

\n", + "comments": "

Initializes a git_diff_options with default values. Equivalent to creating an instance with GIT_DIFF_OPTIONS_INIT.

\n", "group": "diff" }, - "git_diff_blob_to_buffer": { + "git_diff_find_options_init": { "type": "function", - "file": "diff.h", - "line": 1158, - "lineto": 1169, + "file": "git2/diff.h", + "line": 846, + "lineto": 848, "args": [ { - "name": "old_blob", - "type": "const git_blob *", - "comment": "Blob for old side of diff, or NULL for empty blob" - }, - { - "name": "old_as_path", - "type": "const char *", - "comment": "Treat old blob as if it had this filename; can be NULL" - }, - { - "name": "buffer", - "type": "const char *", - "comment": "Raw data for new side of diff, or NULL for empty" - }, - { - "name": "buffer_len", - "type": "size_t", - "comment": "Length of raw data for new side of diff" - }, - { - "name": "buffer_as_path", - "type": "const char *", - "comment": "Treat buffer as if it had this filename; can be NULL" - }, - { - "name": "options", - "type": "const git_diff_options *", - "comment": "Options for diff, or NULL for default options" - }, - { - "name": "file_cb", - "type": "git_diff_file_cb", - "comment": "Callback for \"file\"; made once if there is a diff; can be NULL" - }, - { - "name": "binary_cb", - "type": "git_diff_binary_cb", - "comment": "Callback for binary files; can be NULL" - }, - { - "name": "hunk_cb", - "type": "git_diff_hunk_cb", - "comment": "Callback for each hunk in diff; can be NULL" - }, - { - "name": "line_cb", - "type": "git_diff_line_cb", - "comment": "Callback for each line in diff; can be NULL" + "name": "opts", + "type": "git_diff_find_options *", + "comment": "The `git_diff_find_options` struct to initialize." }, { - "name": "payload", - "type": "void *", - "comment": "Payload passed to each callback function" + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_DIFF_FIND_OPTIONS_VERSION`." } ], - "argline": "const git_blob *old_blob, const char *old_as_path, const char *buffer, size_t buffer_len, const char *buffer_as_path, const git_diff_options *options, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", - "sig": "const git_blob *::const char *::const char *::size_t::const char *::const git_diff_options *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", + "argline": "git_diff_find_options *opts, unsigned int version", + "sig": "git_diff_find_options *::unsigned int", "return": { "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" + "comment": " Zero on success; -1 on failure." }, - "description": "

Directly run a diff between a blob and a buffer.

\n", - "comments": "

As with git_diff_blobs, comparing a blob and buffer lacks some context, so the git_diff_file parameters to the callbacks will be faked a la the rules for git_diff_blobs().

\n\n

Passing NULL for old_blob will be treated as an empty blob (i.e. the file_cb will be invoked with GIT_DELTA_ADDED and the diff will be the entire content of the buffer added). Passing NULL to the buffer will do the reverse, with GIT_DELTA_REMOVED and blob content removed.

\n", + "description": "

Initialize git_diff_find_options structure

\n", + "comments": "

Initializes a git_diff_find_options with default values. Equivalent to creating an instance with GIT_DIFF_FIND_OPTIONS_INIT.

\n", "group": "diff" }, - "git_diff_buffers": { + "git_diff_free": { "type": "function", - "file": "diff.h", - "line": 1192, - "lineto": 1204, + "file": "git2/diff.h", + "line": 862, + "lineto": 862, "args": [ { - "name": "old_buffer", - "type": "const void *", - "comment": "Raw data for old side of diff, or NULL for empty" - }, - { - "name": "old_len", - "type": "size_t", - "comment": "Length of the raw data for old side of the diff" - }, + "name": "diff", + "type": "git_diff *", + "comment": "The previously created diff; cannot be used after free." + } + ], + "argline": "git_diff *diff", + "sig": "git_diff *", + "return": { "type": "void", "comment": null }, + "description": "

Deallocate a diff.

\n", + "comments": "", + "group": "diff", + "examples": { + "diff.c": ["ex/v1.9.1/diff.html#git_diff_free-3"], + "log.c": [ + "ex/v1.9.1/log.html#git_diff_free-25", + "ex/v1.9.1/log.html#git_diff_free-26" + ] + } + }, + "git_diff_tree_to_tree": { + "type": "function", + "file": "git2/diff.h", + "line": 881, + "lineto": 886, + "args": [ { - "name": "old_as_path", - "type": "const char *", - "comment": "Treat old buffer as if it had this filename; can be NULL" + "name": "diff", + "type": "git_diff **", + "comment": "Output pointer to a git_diff pointer to be allocated." }, { - "name": "new_buffer", - "type": "const void *", - "comment": "Raw data for new side of diff, or NULL for empty" + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the trees." }, { - "name": "new_len", - "type": "size_t", - "comment": "Length of raw data for new side of diff" + "name": "old_tree", + "type": "git_tree *", + "comment": "A git_tree object to diff from, or NULL for empty tree." }, { - "name": "new_as_path", - "type": "const char *", - "comment": "Treat buffer as if it had this filename; can be NULL" + "name": "new_tree", + "type": "git_tree *", + "comment": "A git_tree object to diff to, or NULL for empty tree." }, { - "name": "options", + "name": "opts", "type": "const git_diff_options *", - "comment": "Options for diff, or NULL for default options" - }, + "comment": "Structure with options to influence diff or NULL for defaults." + } + ], + "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, git_tree *new_tree, const git_diff_options *opts", + "sig": "git_diff **::git_repository *::git_tree *::git_tree *::const git_diff_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create a diff with the difference between two tree objects.

\n", + "comments": "

This is equivalent to git diff <old-tree> <new-tree>

\n\n

The first tree will be used for the "old_file" side of the delta and the second tree will be used for the "new_file" side of the delta. You can pass NULL to indicate an empty tree, although it is an error to pass NULL for both the old_tree and new_tree.

\n", + "group": "diff", + "examples": { + "diff.c": ["ex/v1.9.1/diff.html#git_diff_tree_to_tree-4"], + "log.c": [ + "ex/v1.9.1/log.html#git_diff_tree_to_tree-27", + "ex/v1.9.1/log.html#git_diff_tree_to_tree-28" + ] + } + }, + "git_diff_tree_to_index": { + "type": "function", + "file": "git2/diff.h", + "line": 908, + "lineto": 913, + "args": [ { - "name": "file_cb", - "type": "git_diff_file_cb", - "comment": "Callback for \"file\"; made once if there is a diff; can be NULL" + "name": "diff", + "type": "git_diff **", + "comment": "Output pointer to a git_diff pointer to be allocated." }, { - "name": "binary_cb", - "type": "git_diff_binary_cb", - "comment": "Callback for binary files; can be NULL" + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the tree and index." }, { - "name": "hunk_cb", - "type": "git_diff_hunk_cb", - "comment": "Callback for each hunk in diff; can be NULL" + "name": "old_tree", + "type": "git_tree *", + "comment": "A git_tree object to diff from, or NULL for empty tree." }, { - "name": "line_cb", - "type": "git_diff_line_cb", - "comment": "Callback for each line in diff; can be NULL" + "name": "index", + "type": "git_index *", + "comment": "The index to diff with; repo index used if NULL." }, { - "name": "payload", - "type": "void *", - "comment": "Payload passed to each callback function" + "name": "opts", + "type": "const git_diff_options *", + "comment": "Structure with options to influence diff or NULL for defaults." } ], - "argline": "const void *old_buffer, size_t old_len, const char *old_as_path, const void *new_buffer, size_t new_len, const char *new_as_path, const git_diff_options *options, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", - "sig": "const void *::size_t::const char *::const void *::size_t::const char *::const git_diff_options *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", - "return": { - "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" - }, - "description": "

Directly run a diff between two buffers.

\n", - "comments": "

Even more than with git_diff_blobs, comparing two buffer lacks context, so the git_diff_file parameters to the callbacks will be faked a la the rules for git_diff_blobs().

\n", - "group": "diff" + "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, git_index *index, const git_diff_options *opts", + "sig": "git_diff **::git_repository *::git_tree *::git_index *::const git_diff_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create a diff between a tree and repository index.

\n", + "comments": "

This is equivalent to git diff --cached <treeish> or if you pass the HEAD tree, then like git diff --cached.

\n\n

The tree you pass will be used for the "old_file" side of the delta, and the index will be used for the "new_file" side of the delta.

\n\n

If you pass NULL for the index, then the existing index of the repo will be used. In this case, the index will be refreshed from disk (if it has changed) before the diff is generated.

\n", + "group": "diff", + "examples": { "diff.c": ["ex/v1.9.1/diff.html#git_diff_tree_to_index-5"] } }, - "git_diff_from_buffer": { + "git_diff_index_to_workdir": { "type": "function", - "file": "diff.h", - "line": 1225, - "lineto": 1228, + "file": "git2/diff.h", + "line": 936, + "lineto": 940, "args": [ { - "name": "out", + "name": "diff", "type": "git_diff **", - "comment": "A pointer to a git_diff pointer that will be allocated." + "comment": "Output pointer to a git_diff pointer to be allocated." }, { - "name": "content", - "type": "const char *", - "comment": "The contents of a patch file" + "name": "repo", + "type": "git_repository *", + "comment": "The repository." }, { - "name": "content_len", - "type": "size_t", - "comment": "The length of the patch file contents" + "name": "index", + "type": "git_index *", + "comment": "The index to diff from; repo index used if NULL." + }, + { + "name": "opts", + "type": "const git_diff_options *", + "comment": "Structure with options to influence diff or NULL for defaults." } ], - "argline": "git_diff **out, const char *content, size_t content_len", - "sig": "git_diff **::const char *::size_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Read the contents of a git patch file into a git_diff object.

\n", - "comments": "

The diff object produced is similar to the one that would be produced if you actually produced it computationally by comparing two trees, however there may be subtle differences. For example, a patch file likely contains abbreviated object IDs, so the object IDs in a git_diff_delta produced by this function will also be abbreviated.

\n\n

This function will only read patch files created by a git implementation, it will not read unified diffs produced by the diff program, nor any other types of patch files.

\n", - "group": "diff" + "argline": "git_diff **diff, git_repository *repo, git_index *index, const git_diff_options *opts", + "sig": "git_diff **::git_repository *::git_index *::const git_diff_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create a diff between the repository index and the workdir directory.

\n", + "comments": "

This matches the git diff command. See the note below on git_diff_tree_to_workdir for a discussion of the difference between git diff and git diff HEAD and how to emulate a git diff <treeish> using libgit2.

\n\n

The index will be used for the "old_file" side of the delta, and the working directory will be used for the "new_file" side of the delta.

\n\n

If you pass NULL for the index, then the existing index of the repo will be used. In this case, the index will be refreshed from disk (if it has changed) before the diff is generated.

\n", + "group": "diff", + "examples": { + "diff.c": ["ex/v1.9.1/diff.html#git_diff_index_to_workdir-6"] + } }, - "git_diff_get_stats": { + "git_diff_tree_to_workdir": { "type": "function", - "file": "diff.h", - "line": 1264, - "lineto": 1266, + "file": "git2/diff.h", + "line": 966, + "lineto": 970, "args": [ { - "name": "out", - "type": "git_diff_stats **", - "comment": "Structure containg the diff statistics." + "name": "diff", + "type": "git_diff **", + "comment": "A pointer to a git_diff pointer that will be allocated." }, { - "name": "diff", - "type": "git_diff *", - "comment": "A git_diff generated by one of the above functions." + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the tree." + }, + { + "name": "old_tree", + "type": "git_tree *", + "comment": "A git_tree object to diff from, or NULL for empty tree." + }, + { + "name": "opts", + "type": "const git_diff_options *", + "comment": "Structure with options to influence diff or NULL for defaults." } ], - "argline": "git_diff_stats **out, git_diff *diff", - "sig": "git_diff_stats **::git_diff *", - "return": { - "type": "int", - "comment": " 0 on success; non-zero on error" - }, - "description": "

Accumulate diff statistics for all patches.

\n", - "comments": "", + "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, const git_diff_options *opts", + "sig": "git_diff **::git_repository *::git_tree *::const git_diff_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create a diff between a tree and the working directory.

\n", + "comments": "

The tree you provide will be used for the "old_file" side of the delta, and the working directory will be used for the "new_file" side.

\n\n

This is not the same as git diff <treeish> or git diff-index <treeish>. Those commands use information from the index, whereas this function strictly returns the differences between the tree and the files in the working directory, regardless of the state of the index. Use git_diff_tree_to_workdir_with_index to emulate those commands.

\n\n

To see difference between this and git_diff_tree_to_workdir_with_index, consider the example of a staged file deletion where the file has then been put back into the working dir and further modified. The tree-to-workdir diff for that file is 'modified', but git diff would show status 'deleted' since there is a staged delete.

\n", "group": "diff", "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_get_stats-10" - ] + "diff.c": ["ex/v1.9.1/diff.html#git_diff_tree_to_workdir-7"] } }, - "git_diff_stats_files_changed": { + "git_diff_tree_to_workdir_with_index": { "type": "function", - "file": "diff.h", - "line": 1274, - "lineto": 1275, + "file": "git2/diff.h", + "line": 986, + "lineto": 990, "args": [ { - "name": "stats", - "type": "const git_diff_stats *", - "comment": "A `git_diff_stats` generated by one of the above functions." + "name": "diff", + "type": "git_diff **", + "comment": "A pointer to a git_diff pointer that will be allocated." + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the tree." + }, + { + "name": "old_tree", + "type": "git_tree *", + "comment": "A git_tree object to diff from, or NULL for empty tree." + }, + { + "name": "opts", + "type": "const git_diff_options *", + "comment": "Structure with options to influence diff or NULL for defaults." } ], - "argline": "const git_diff_stats *stats", - "sig": "const git_diff_stats *", - "return": { - "type": "size_t", - "comment": " total number of files changed in the diff" - }, - "description": "

Get the total number of files changed in a diff

\n", - "comments": "", - "group": "diff" + "argline": "git_diff **diff, git_repository *repo, git_tree *old_tree, const git_diff_options *opts", + "sig": "git_diff **::git_repository *::git_tree *::const git_diff_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create a diff between a tree and the working directory using index data\n to account for staged deletes, tracked files, etc.

\n", + "comments": "

This emulates git diff <tree> by diffing the tree to the index and the index to the working directory and blending the results into a single diff that includes staged deleted, etc.

\n", + "group": "diff", + "examples": { + "diff.c": ["ex/v1.9.1/diff.html#git_diff_tree_to_workdir_with_index-8"] + } }, - "git_diff_stats_insertions": { + "git_diff_index_to_index": { "type": "function", - "file": "diff.h", - "line": 1283, - "lineto": 1284, + "file": "git2/diff.h", + "line": 1005, + "lineto": 1010, "args": [ { - "name": "stats", - "type": "const git_diff_stats *", - "comment": "A `git_diff_stats` generated by one of the above functions." + "name": "diff", + "type": "git_diff **", + "comment": "Output pointer to a git_diff pointer to be allocated." + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository containing the indexes." + }, + { + "name": "old_index", + "type": "git_index *", + "comment": "A git_index object to diff from." + }, + { + "name": "new_index", + "type": "git_index *", + "comment": "A git_index object to diff to." + }, + { + "name": "opts", + "type": "const git_diff_options *", + "comment": "Structure with options to influence diff or NULL for defaults." } ], - "argline": "const git_diff_stats *stats", - "sig": "const git_diff_stats *", - "return": { - "type": "size_t", - "comment": " total number of insertions in the diff" - }, - "description": "

Get the total number of insertions in a diff

\n", - "comments": "", + "argline": "git_diff **diff, git_repository *repo, git_index *old_index, git_index *new_index, const git_diff_options *opts", + "sig": "git_diff **::git_repository *::git_index *::git_index *::const git_diff_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create a diff with the difference between two index objects.

\n", + "comments": "

The first index will be used for the "old_file" side of the delta and the second index will be used for the "new_file" side of the delta.

\n", "group": "diff" }, - "git_diff_stats_deletions": { + "git_diff_merge": { "type": "function", - "file": "diff.h", - "line": 1292, - "lineto": 1293, + "file": "git2/diff.h", + "line": 1026, + "lineto": 1028, "args": [ { - "name": "stats", - "type": "const git_diff_stats *", - "comment": "A `git_diff_stats` generated by one of the above functions." + "name": "onto", + "type": "git_diff *", + "comment": "Diff to merge into." + }, + { + "name": "from", + "type": "const git_diff *", + "comment": "Diff to merge." } ], - "argline": "const git_diff_stats *stats", - "sig": "const git_diff_stats *", - "return": { - "type": "size_t", - "comment": " total number of deletions in the diff" - }, - "description": "

Get the total number of deletions in a diff

\n", - "comments": "", + "argline": "git_diff *onto, const git_diff *from", + "sig": "git_diff *::const git_diff *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Merge one diff into another.

\n", + "comments": "

This merges items from the "from" list into the "onto" list. The resulting diff will have all items that appear in either list. If an item appears in both lists, then it will be "merged" to appear as if the old version was from the "onto" list and the new version is from the "from" list (with the exception that if the item has a pending DELETE in the middle, then it will show as deleted).

\n", "group": "diff" }, - "git_diff_stats_to_buf": { + "git_diff_find_similar": { "type": "function", - "file": "diff.h", - "line": 1304, - "lineto": 1308, + "file": "git2/diff.h", + "line": 1042, + "lineto": 1044, "args": [ { - "name": "out", - "type": "git_buf *", - "comment": "buffer to store the formatted diff statistics in." - }, - { - "name": "stats", - "type": "const git_diff_stats *", - "comment": "A `git_diff_stats` generated by one of the above functions." - }, - { - "name": "format", - "type": "git_diff_stats_format_t", - "comment": "Formatting option." + "name": "diff", + "type": "git_diff *", + "comment": "diff to run detection algorithms on" }, { - "name": "width", - "type": "size_t", - "comment": "Target width for output (only affects GIT_DIFF_STATS_FULL)" + "name": "options", + "type": "const git_diff_find_options *", + "comment": "Control how detection should be run, NULL for defaults" } ], - "argline": "git_buf *out, const git_diff_stats *stats, git_diff_stats_format_t format, size_t width", - "sig": "git_buf *::const git_diff_stats *::git_diff_stats_format_t::size_t", - "return": { - "type": "int", - "comment": " 0 on success; non-zero on error" - }, - "description": "

Print diff statistics to a git_buf.

\n", - "comments": "", + "argline": "git_diff *diff, const git_diff_find_options *options", + "sig": "git_diff *::const git_diff_find_options *", + "return": { "type": "int", "comment": " 0 on success, -1 on failure" }, + "description": "

Transform a diff marking file renames, copies, etc.

\n", + "comments": "

This modifies a diff in place, replacing old entries that look like renames or copies with new entries reflecting those changes. This also will, if requested, break modified files into add/remove pairs if the amount of change is above a threshold.

\n", "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_stats_to_buf-11" - ] - } + "examples": { "diff.c": ["ex/v1.9.1/diff.html#git_diff_find_similar-9"] } }, - "git_diff_stats_free": { + "git_diff_num_deltas": { "type": "function", - "file": "diff.h", - "line": 1316, - "lineto": 1316, + "file": "git2/diff.h", + "line": 1062, + "lineto": 1062, "args": [ { - "name": "stats", - "type": "git_diff_stats *", - "comment": "The previously created statistics object;\n cannot be used after free." + "name": "diff", + "type": "const git_diff *", + "comment": "A git_diff generated by one of the above functions" } ], - "argline": "git_diff_stats *stats", - "sig": "git_diff_stats *", + "argline": "const git_diff *diff", + "sig": "const git_diff *", "return": { - "type": "void", - "comment": null + "type": "size_t", + "comment": " Count of number of deltas in the list" }, - "description": "

Deallocate a git_diff_stats.

\n", + "description": "

Query how many diff records are there in a diff.

\n", "comments": "", "group": "diff", - "examples": { - "diff.c": [ - "ex/HEAD/diff.html#git_diff_stats_free-12" - ] - } + "examples": { "log.c": ["ex/v1.9.1/log.html#git_diff_num_deltas-29"] } }, - "git_diff_format_email": { + "git_diff_num_deltas_of_type": { "type": "function", - "file": "diff.h", - "line": 1368, - "lineto": 1371, + "file": "git2/diff.h", + "line": 1075, + "lineto": 1076, "args": [ - { - "name": "out", - "type": "git_buf *", - "comment": "buffer to store the e-mail patch in" - }, { "name": "diff", - "type": "git_diff *", - "comment": "containing the commit" + "type": "const git_diff *", + "comment": "A git_diff generated by one of the above functions" }, { - "name": "opts", - "type": "const git_diff_format_email_options *", - "comment": "structure with options to influence content and formatting." + "name": "type", + "type": "git_delta_t", + "comment": "A git_delta_t value to filter the count" } ], - "argline": "git_buf *out, git_diff *diff, const git_diff_format_email_options *opts", - "sig": "git_buf *::git_diff *::const git_diff_format_email_options *", + "argline": "const git_diff *diff, git_delta_t type", + "sig": "const git_diff *::git_delta_t", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "size_t", + "comment": " Count of number of deltas matching delta_t type" }, - "description": "

Create an e-mail ready patch from a diff.

\n", - "comments": "", + "description": "

Query how many diff deltas are there in a diff filtered by type.

\n", + "comments": "

This works just like git_diff_num_deltas() with an extra parameter that is a git_delta_t and returns just the count of how many deltas match that particular type.

\n", "group": "diff" }, - "git_diff_commit_as_email": { + "git_diff_get_delta": { "type": "function", - "file": "diff.h", - "line": 1387, - "lineto": 1394, + "file": "git2/diff.h", + "line": 1095, + "lineto": 1096, "args": [ { - "name": "out", - "type": "git_buf *", - "comment": "buffer to store the e-mail patch in" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "containing the commit" - }, - { - "name": "commit", - "type": "git_commit *", - "comment": "pointer to up commit" - }, - { - "name": "patch_no", - "type": "size_t", - "comment": "patch number of the commit" - }, - { - "name": "total_patches", - "type": "size_t", - "comment": "total number of patches in the patch set" - }, - { - "name": "flags", - "type": "git_diff_format_email_flags_t", - "comment": "determines the formatting of the e-mail" + "name": "diff", + "type": "const git_diff *", + "comment": "Diff list object" }, - { - "name": "diff_opts", - "type": "const git_diff_options *", - "comment": "structure with options to influence diff or NULL for defaults." - } + { "name": "idx", "type": "size_t", "comment": "Index into diff list" } ], - "argline": "git_buf *out, git_repository *repo, git_commit *commit, size_t patch_no, size_t total_patches, git_diff_format_email_flags_t flags, const git_diff_options *diff_opts", - "sig": "git_buf *::git_repository *::git_commit *::size_t::size_t::git_diff_format_email_flags_t::const git_diff_options *", + "argline": "const git_diff *diff, size_t idx", + "sig": "const git_diff *::size_t", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "const git_diff_delta *", + "comment": " Pointer to git_diff_delta (or NULL if `idx` out of range)" }, - "description": "

Create an e-mail ready patch for a commit.

\n", - "comments": "

Does not support creating patches for merge commits (yet).

\n", + "description": "

Return the diff delta for an entry in the diff list.

\n", + "comments": "

The git_diff_delta pointer points to internal data and you do not have to release it when you are done with it. It will go away when the * git_diff (or any associated git_patch) goes away.

\n\n

Note that the flags on the delta related to whether it has binary content or not may not be set if there are no attributes set for the file and there has been no reason to load the file data at this point. For now, if you need those flags to be up to date, your only option is to either use git_diff_foreach or create a git_patch.

\n", "group": "diff" }, - "git_diff_format_email_init_options": { + "git_diff_is_sorted_icase": { "type": "function", - "file": "diff.h", - "line": 1405, - "lineto": 1407, + "file": "git2/diff.h", + "line": 1104, + "lineto": 1104, "args": [ { - "name": "opts", - "type": "git_diff_format_email_options *", - "comment": "The `git_diff_format_email_options` struct to initialize" - }, - { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_DIFF_FORMAT_EMAIL_OPTIONS_VERSION`" + "name": "diff", + "type": "const git_diff *", + "comment": "diff to check" } ], - "argline": "git_diff_format_email_options *opts, unsigned int version", - "sig": "git_diff_format_email_options *::unsigned int", + "argline": "const git_diff *diff", + "sig": "const git_diff *", "return": { "type": "int", - "comment": " Zero on success; -1 on failure." + "comment": " 0 if case sensitive, 1 if case is ignored" }, - "description": "

Initializes a git_diff_format_email_options with default values.

\n", - "comments": "

Equivalent to creating an instance with GIT_DIFF_FORMAT_EMAIL_OPTIONS_INIT.

\n", + "description": "

Check if deltas are sorted case sensitively or insensitively.

\n", + "comments": "", "group": "diff" }, - "git_diff_patchid_init_options": { + "git_diff_foreach": { "type": "function", - "file": "diff.h", - "line": 1428, - "lineto": 1430, + "file": "git2/diff.h", + "line": 1132, + "lineto": 1138, "args": [ { - "name": "opts", - "type": "git_diff_patchid_options *", - "comment": null + "name": "diff", + "type": "git_diff *", + "comment": "A git_diff generated by one of the above functions." }, { - "name": "version", - "type": "unsigned int", - "comment": null - } - ], - "argline": "git_diff_patchid_options *opts, unsigned int version", - "sig": "git_diff_patchid_options *::unsigned int", + "name": "file_cb", + "type": "git_diff_file_cb", + "comment": "Callback function to make per file in the diff." + }, + { + "name": "binary_cb", + "type": "git_diff_binary_cb", + "comment": "Optional callback to make for binary files." + }, + { + "name": "hunk_cb", + "type": "git_diff_hunk_cb", + "comment": "Optional callback to make per hunk of text diff. This\n callback is called to describe a range of lines in the\n diff. It will not be issued for binary files." + }, + { + "name": "line_cb", + "type": "git_diff_line_cb", + "comment": "Optional callback to make per line of diff text. This\n same callback will be made for context lines, added, and\n removed lines, and even for a deleted trailing newline." + }, + { + "name": "payload", + "type": "void *", + "comment": "Reference pointer that will be passed to your callbacks." + } + ], + "argline": "git_diff *diff, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", + "sig": "git_diff *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, non-zero callback return value, or error code" }, - "description": "

Initialize git_diff_patchid_options structure.

\n", - "comments": "

Initializes the structure with default values. Equivalent to creating an instance with GIT_DIFF_PATCHID_OPTIONS_INIT.

\n", + "description": "

Loop over all deltas in a diff issuing callbacks.

\n", + "comments": "

This will iterate through all of the files described in a diff. You should provide a file callback to learn about each file.

\n\n

The "hunk" and "line" callbacks are optional, and the text diff of the files will only be calculated if they are not NULL. Of course, these callbacks will not be invoked for binary files on the diff or for files whose only changed is a file mode change.

\n\n

Returning a non-zero value from any of the callbacks will terminate the iteration and return the value to the user.

\n", "group": "diff" }, - "git_diff_patchid": { + "git_diff_status_char": { "type": "function", - "file": "diff.h", - "line": 1452, - "lineto": 1452, + "file": "git2/diff.h", + "line": 1151, + "lineto": 1151, "args": [ { - "name": "out", - "type": "git_oid *", - "comment": "Pointer where the calculated patch ID shoul be\n stored" - }, + "name": "status", + "type": "git_delta_t", + "comment": "The git_delta_t value to look up" + } + ], + "argline": "git_delta_t status", + "sig": "git_delta_t", + "return": { + "type": "char", + "comment": " The single character label for that code" + }, + "description": "

Look up the single character abbreviation for a delta status code.

\n", + "comments": "

When you run git diff --name-status it uses single letter codes in the output such as 'A' for added, 'D' for deleted, 'M' for modified, etc. This function converts a git_delta_t value into these letters for your own purposes. GIT_DELTA_UNTRACKED will return a space (i.e. ' ').

\n", + "group": "diff" + }, + "git_diff_print": { + "type": "function", + "file": "git2/diff.h", + "line": 1177, + "lineto": 1181, + "args": [ { "name": "diff", "type": "git_diff *", - "comment": "The diff to calculate the ID for" + "comment": "A git_diff generated by one of the above functions." }, { - "name": "opts", - "type": "git_diff_patchid_options *", - "comment": "Options for how to calculate the patch ID. This is\n intended for future changes, as currently no options are\n available." + "name": "format", + "type": "git_diff_format_t", + "comment": "A git_diff_format_t value to pick the text format." + }, + { + "name": "print_cb", + "type": "git_diff_line_cb", + "comment": "Callback to make per line of diff text." + }, + { + "name": "payload", + "type": "void *", + "comment": "Reference pointer that will be passed to your callback." } ], - "argline": "git_oid *out, git_diff *diff, git_diff_patchid_options *opts", - "sig": "git_oid *::git_diff *::git_diff_patchid_options *", + "argline": "git_diff *diff, git_diff_format_t format, git_diff_line_cb print_cb, void *payload", + "sig": "git_diff *::git_diff_format_t::git_diff_line_cb::void *", "return": { "type": "int", - "comment": " 0 on success, an error code otherwise." - }, - "description": "

Calculate the patch ID for the given patch.

\n", - "comments": "

Calculate a stable patch ID for the given patch by summing the hash of the file diffs, ignoring whitespace and line numbers. This can be used to derive whether two diffs are the same with a high probability.

\n\n

Currently, this function only calculates stable patch IDs, as defined in git-patch-id(1), and should in fact generate the same IDs as the upstream git project does.

\n", - "group": "diff" - }, - "giterr_last": { - "type": "function", - "file": "errors.h", - "line": 115, - "lineto": 115, - "args": [], - "argline": "", - "sig": "", - "return": { - "type": "const git_error *", - "comment": " A git_error object." + "comment": " 0 on success, non-zero callback return value, or error code" }, - "description": "

Return the last git_error object that was generated for the\n current thread or NULL if no error has occurred.

\n", - "comments": "", - "group": "giterr", + "description": "

Iterate over a diff generating formatted text output.

\n", + "comments": "

Returning a non-zero value from the callbacks will terminate the iteration and return the non-zero value to the caller.

\n", + "group": "diff", "examples": { - "general.c": [ - "ex/HEAD/general.html#giterr_last-33" - ], - "merge.c": [ - "ex/HEAD/merge.html#giterr_last-10", - "ex/HEAD/merge.html#giterr_last-11" - ], - "network/clone.c": [ - "ex/HEAD/network/clone.html#giterr_last-2" - ] + "diff.c": ["ex/v1.9.1/diff.html#git_diff_print-10"], + "log.c": ["ex/v1.9.1/log.html#git_diff_print-30"] } }, - "giterr_clear": { - "type": "function", - "file": "errors.h", - "line": 120, - "lineto": 120, - "args": [], - "argline": "", - "sig": "", - "return": { - "type": "void", - "comment": null - }, - "description": "

Clear the last library error that occurred for this thread.

\n", - "comments": "", - "group": "giterr" - }, - "giterr_set_str": { + "git_diff_to_buf": { "type": "function", - "file": "errors.h", - "line": 138, - "lineto": 138, + "file": "git2/diff.h", + "line": 1193, + "lineto": 1196, "args": [ { - "name": "error_class", - "type": "int", - "comment": "One of the `git_error_t` enum above describing the\n general subsystem that is responsible for the error." + "name": "out", + "type": "git_buf *", + "comment": "A pointer to a user-allocated git_buf that will\n contain the diff text" }, { - "name": "string", - "type": "const char *", - "comment": "The formatted error message to keep" + "name": "diff", + "type": "git_diff *", + "comment": "A git_diff generated by one of the above functions." + }, + { + "name": "format", + "type": "git_diff_format_t", + "comment": "A git_diff_format_t value to pick the text format." } ], - "argline": "int error_class, const char *string", - "sig": "int::const char *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Set the error message string for this thread.

\n", - "comments": "

This function is public so that custom ODB backends and the like can relay an error message through libgit2. Most regular users of libgit2 will never need to call this function -- actually, calling it in most circumstances (for example, calling from within a callback function) will just end up having the value overwritten by libgit2 internals.

\n\n

This error message is stored in thread-local storage and only applies to the particular thread that this libgit2 call is made from.

\n", - "group": "giterr" - }, - "giterr_set_oom": { - "type": "function", - "file": "errors.h", - "line": 149, - "lineto": 149, - "args": [], - "argline": "", - "sig": "", - "return": { - "type": "void", - "comment": null - }, - "description": "

Set the error message to a special value for memory allocation failure.

\n", - "comments": "

The normal giterr_set_str() function attempts to strdup() the string that is passed in. This is not a good idea when the error in question is a memory allocation failure. That circumstance has a special setter function that sets the error string to a known and statically allocated internal value.

\n", - "group": "giterr" + "argline": "git_buf *out, git_diff *diff, git_diff_format_t format", + "sig": "git_buf *::git_diff *::git_diff_format_t", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Produce the complete formatted text output from a diff into a\n buffer.

\n", + "comments": "", + "group": "diff" }, - "git_filter_list_load": { + "git_diff_blobs": { "type": "function", - "file": "filter.h", - "line": 90, - "lineto": 96, + "file": "git2/diff.h", + "line": 1232, + "lineto": 1242, "args": [ { - "name": "filters", - "type": "git_filter_list **", - "comment": "Output newly created git_filter_list (or NULL)" + "name": "old_blob", + "type": "const git_blob *", + "comment": "Blob for old side of diff, or NULL for empty blob" }, { - "name": "repo", - "type": "git_repository *", - "comment": "Repository object that contains `path`" + "name": "old_as_path", + "type": "const char *", + "comment": "Treat old blob as if it had this filename; can be NULL" }, { - "name": "blob", - "type": "git_blob *", - "comment": "The blob to which the filter will be applied (if known)" + "name": "new_blob", + "type": "const git_blob *", + "comment": "Blob for new side of diff, or NULL for empty blob" }, { - "name": "path", + "name": "new_as_path", "type": "const char *", - "comment": "Relative path of the file to be filtered" + "comment": "Treat new blob as if it had this filename; can be NULL" }, { - "name": "mode", - "type": "git_filter_mode_t", - "comment": "Filtering direction (WT->ODB or ODB->WT)" + "name": "options", + "type": "const git_diff_options *", + "comment": "Options for diff, or NULL for default options" }, { - "name": "flags", - "type": "uint32_t", - "comment": "Combination of `git_filter_flag_t` flags" - } - ], - "argline": "git_filter_list **filters, git_repository *repo, git_blob *blob, const char *path, git_filter_mode_t mode, uint32_t flags", - "sig": "git_filter_list **::git_repository *::git_blob *::const char *::git_filter_mode_t::uint32_t", - "return": { - "type": "int", - "comment": " 0 on success (which could still return NULL if no filters are\n needed for the requested file), \n<\n0 on error" - }, - "description": "

Load the filter list for a given path.

\n", - "comments": "

This will return 0 (success) but set the output git_filter_list to NULL if no filters are requested for the given file.

\n", - "group": "filter" - }, - "git_filter_list_contains": { - "type": "function", - "file": "filter.h", - "line": 110, - "lineto": 112, - "args": [ + "name": "file_cb", + "type": "git_diff_file_cb", + "comment": "Callback for \"file\"; made once if there is a diff; can be NULL" + }, { - "name": "filters", - "type": "git_filter_list *", - "comment": "A loaded git_filter_list (or NULL)" + "name": "binary_cb", + "type": "git_diff_binary_cb", + "comment": "Callback for binary files; can be NULL" }, { - "name": "name", - "type": "const char *", - "comment": "The name of the filter to query" + "name": "hunk_cb", + "type": "git_diff_hunk_cb", + "comment": "Callback for each hunk in diff; can be NULL" + }, + { + "name": "line_cb", + "type": "git_diff_line_cb", + "comment": "Callback for each line in diff; can be NULL" + }, + { + "name": "payload", + "type": "void *", + "comment": "Payload passed to each callback function" } ], - "argline": "git_filter_list *filters, const char *name", - "sig": "git_filter_list *::const char *", + "argline": "const git_blob *old_blob, const char *old_as_path, const git_blob *new_blob, const char *new_as_path, const git_diff_options *options, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", + "sig": "const git_blob *::const char *::const git_blob *::const char *::const git_diff_options *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", "return": { "type": "int", - "comment": " 1 if the filter is in the list, 0 otherwise" + "comment": " 0 on success, non-zero callback return value, or error code" }, - "description": "

Query the filter list to see if a given filter (by name) will run.\n The built-in filters "crlf" and "ident" can be queried, otherwise this\n is the name of the filter specified by the filter attribute.

\n", - "comments": "

This will return 0 if the given filter is not in the list, or 1 if the filter will be applied.

\n", - "group": "filter" + "description": "

Directly run a diff on two blobs.

\n", + "comments": "

Compared to a file, a blob lacks some contextual information. As such, the git_diff_file given to the callback will have some fake data; i.e. mode will be 0 and path will be NULL.

\n\n

NULL is allowed for either old_blob or new_blob and will be treated as an empty blob, with the oid set to NULL in the git_diff_file data. Passing NULL for both blobs is a noop; no callbacks will be made at all.

\n\n

We do run a binary content check on the blob content and if either blob looks like binary data, the git_diff_delta binary attribute will be set to 1 and no call to the hunk_cb nor line_cb will be made (unless you pass GIT_DIFF_FORCE_TEXT of course).

\n", + "group": "diff" }, - "git_filter_list_apply_to_data": { + "git_diff_blob_to_buffer": { "type": "function", - "file": "filter.h", - "line": 134, - "lineto": 137, + "file": "git2/diff.h", + "line": 1269, + "lineto": 1280, "args": [ { - "name": "out", - "type": "git_buf *", - "comment": "Buffer to store the result of the filtering" + "name": "old_blob", + "type": "const git_blob *", + "comment": "Blob for old side of diff, or NULL for empty blob" }, { - "name": "filters", - "type": "git_filter_list *", - "comment": "A loaded git_filter_list (or NULL)" + "name": "old_as_path", + "type": "const char *", + "comment": "Treat old blob as if it had this filename; can be NULL" }, { - "name": "in", - "type": "git_buf *", - "comment": "Buffer containing the data to filter" + "name": "buffer", + "type": "const char *", + "comment": "Raw data for new side of diff, or NULL for empty" + }, + { + "name": "buffer_len", + "type": "size_t", + "comment": "Length of raw data for new side of diff" + }, + { + "name": "buffer_as_path", + "type": "const char *", + "comment": "Treat buffer as if it had this filename; can be NULL" + }, + { + "name": "options", + "type": "const git_diff_options *", + "comment": "Options for diff, or NULL for default options" + }, + { + "name": "file_cb", + "type": "git_diff_file_cb", + "comment": "Callback for \"file\"; made once if there is a diff; can be NULL" + }, + { + "name": "binary_cb", + "type": "git_diff_binary_cb", + "comment": "Callback for binary files; can be NULL" + }, + { + "name": "hunk_cb", + "type": "git_diff_hunk_cb", + "comment": "Callback for each hunk in diff; can be NULL" + }, + { + "name": "line_cb", + "type": "git_diff_line_cb", + "comment": "Callback for each line in diff; can be NULL" + }, + { + "name": "payload", + "type": "void *", + "comment": "Payload passed to each callback function" } ], - "argline": "git_buf *out, git_filter_list *filters, git_buf *in", - "sig": "git_buf *::git_filter_list *::git_buf *", + "argline": "const git_blob *old_blob, const char *old_as_path, const char *buffer, size_t buffer_len, const char *buffer_as_path, const git_diff_options *options, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", + "sig": "const git_blob *::const char *::const char *::size_t::const char *::const git_diff_options *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", "return": { "type": "int", - "comment": " 0 on success, an error code otherwise" + "comment": " 0 on success, non-zero callback return value, or error code" }, - "description": "

Apply filter list to a data buffer.

\n", - "comments": "

See git2/buffer.h for background on git_buf objects.

\n\n

If the in buffer holds data allocated by libgit2 (i.e. in->asize is not zero), then it will be overwritten when applying the filters. If not, then it will be left untouched.

\n\n

If there are no filters to apply (or filters is NULL), then the out buffer will reference the in buffer data (with asize set to zero) instead of allocating data. This keeps allocations to a minimum, but it means you have to be careful about freeing the in data since out may be pointing to it!

\n", - "group": "filter" + "description": "

Directly run a diff between a blob and a buffer.

\n", + "comments": "

As with git_diff_blobs, comparing a blob and buffer lacks some context, so the git_diff_file parameters to the callbacks will be faked a la the rules for git_diff_blobs().

\n\n

Passing NULL for old_blob will be treated as an empty blob (i.e. the file_cb will be invoked with GIT_DELTA_ADDED and the diff will be the entire content of the buffer added). Passing NULL to the buffer will do the reverse, with GIT_DELTA_REMOVED and blob content removed.

\n", + "group": "diff" }, - "git_filter_list_apply_to_file": { + "git_diff_buffers": { "type": "function", - "file": "filter.h", - "line": 148, - "lineto": 152, + "file": "git2/diff.h", + "line": 1303, + "lineto": 1315, "args": [ { - "name": "out", - "type": "git_buf *", - "comment": "buffer into which to store the filtered file" + "name": "old_buffer", + "type": "const void *", + "comment": "Raw data for old side of diff, or NULL for empty" }, { - "name": "filters", - "type": "git_filter_list *", - "comment": "the list of filters to apply" + "name": "old_len", + "type": "size_t", + "comment": "Length of the raw data for old side of the diff" }, { - "name": "repo", - "type": "git_repository *", - "comment": "the repository in which to perform the filtering" + "name": "old_as_path", + "type": "const char *", + "comment": "Treat old buffer as if it had this filename; can be NULL" }, { - "name": "path", + "name": "new_buffer", + "type": "const void *", + "comment": "Raw data for new side of diff, or NULL for empty" + }, + { + "name": "new_len", + "type": "size_t", + "comment": "Length of raw data for new side of diff" + }, + { + "name": "new_as_path", "type": "const char *", - "comment": "the path of the file to filter, a relative path will be\n taken as relative to the workdir" - } - ], - "argline": "git_buf *out, git_filter_list *filters, git_repository *repo, const char *path", - "sig": "git_buf *::git_filter_list *::git_repository *::const char *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Apply a filter list to the contents of a file on disk

\n", - "comments": "", - "group": "filter" - }, - "git_filter_list_apply_to_blob": { - "type": "function", - "file": "filter.h", - "line": 161, - "lineto": 164, - "args": [ + "comment": "Treat buffer as if it had this filename; can be NULL" + }, { - "name": "out", - "type": "git_buf *", - "comment": "buffer into which to store the filtered file" + "name": "options", + "type": "const git_diff_options *", + "comment": "Options for diff, or NULL for default options" }, { - "name": "filters", - "type": "git_filter_list *", - "comment": "the list of filters to apply" + "name": "file_cb", + "type": "git_diff_file_cb", + "comment": "Callback for \"file\"; made once if there is a diff; can be NULL" }, { - "name": "blob", - "type": "git_blob *", - "comment": "the blob to filter" - } - ], - "argline": "git_buf *out, git_filter_list *filters, git_blob *blob", - "sig": "git_buf *::git_filter_list *::git_blob *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Apply a filter list to the contents of a blob

\n", - "comments": "", - "group": "filter" - }, - "git_filter_list_stream_data": { - "type": "function", - "file": "filter.h", - "line": 173, - "lineto": 176, - "args": [ + "name": "binary_cb", + "type": "git_diff_binary_cb", + "comment": "Callback for binary files; can be NULL" + }, { - "name": "filters", - "type": "git_filter_list *", - "comment": "the list of filters to apply" + "name": "hunk_cb", + "type": "git_diff_hunk_cb", + "comment": "Callback for each hunk in diff; can be NULL" }, { - "name": "data", - "type": "git_buf *", - "comment": "the buffer to filter" + "name": "line_cb", + "type": "git_diff_line_cb", + "comment": "Callback for each line in diff; can be NULL" }, { - "name": "target", - "type": "git_writestream *", - "comment": "the stream into which the data will be written" + "name": "payload", + "type": "void *", + "comment": "Payload passed to each callback function" } ], - "argline": "git_filter_list *filters, git_buf *data, git_writestream *target", - "sig": "git_filter_list *::git_buf *::git_writestream *", + "argline": "const void *old_buffer, size_t old_len, const char *old_as_path, const void *new_buffer, size_t new_len, const char *new_as_path, const git_diff_options *options, git_diff_file_cb file_cb, git_diff_binary_cb binary_cb, git_diff_hunk_cb hunk_cb, git_diff_line_cb line_cb, void *payload", + "sig": "const void *::size_t::const char *::const void *::size_t::const char *::const git_diff_options *::git_diff_file_cb::git_diff_binary_cb::git_diff_hunk_cb::git_diff_line_cb::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, non-zero callback return value, or error code" }, - "description": "

Apply a filter list to an arbitrary buffer as a stream

\n", - "comments": "", - "group": "filter" + "description": "

Directly run a diff between two buffers.

\n", + "comments": "

Even more than with git_diff_blobs, comparing two buffer lacks context, so the git_diff_file parameters to the callbacks will be faked a la the rules for git_diff_blobs().

\n", + "group": "diff" }, - "git_filter_list_stream_file": { + "git_diff_from_buffer": { "type": "function", - "file": "filter.h", - "line": 187, - "lineto": 191, + "file": "git2/diff.h", + "line": 1355, + "lineto": 1362, "args": [ { - "name": "filters", - "type": "git_filter_list *", - "comment": "the list of filters to apply" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository in which to perform the filtering" + "name": "out", + "type": "git_diff **", + "comment": "A pointer to a git_diff pointer that will be allocated." }, { - "name": "path", + "name": "content", "type": "const char *", - "comment": "the path of the file to filter, a relative path will be\n taken as relative to the workdir" + "comment": "The contents of a patch file" }, { - "name": "target", - "type": "git_writestream *", - "comment": "the stream into which the data will be written" + "name": "content_len", + "type": "size_t", + "comment": "The length of the patch file contents" } ], - "argline": "git_filter_list *filters, git_repository *repo, const char *path, git_writestream *target", - "sig": "git_filter_list *::git_repository *::const char *::git_writestream *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Apply a filter list to a file as a stream

\n", - "comments": "", - "group": "filter" + "argline": "git_diff **out, const char *content, size_t content_len", + "sig": "git_diff **::const char *::size_t", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Read the contents of a git patch file into a git_diff object.

\n", + "comments": "

The diff object produced is similar to the one that would be produced if you actually produced it computationally by comparing two trees, however there may be subtle differences. For example, a patch file likely contains abbreviated object IDs, so the object IDs in a git_diff_delta produced by this function will also be abbreviated.

\n\n

This function will only read patch files created by a git implementation, it will not read unified diffs produced by the diff program, nor any other types of patch files.

\n", + "group": "diff", + "examples": { + "diff.c": [ + "ex/v1.9.1/diff.html#git_diff_from_buffer-11", + "ex/v1.9.1/diff.html#git_diff_from_buffer-12" + ] + } }, - "git_filter_list_stream_blob": { + "git_diff_get_stats": { "type": "function", - "file": "filter.h", - "line": 200, - "lineto": 203, + "file": "git2/diff.h", + "line": 1398, + "lineto": 1400, "args": [ { - "name": "filters", - "type": "git_filter_list *", - "comment": "the list of filters to apply" - }, - { - "name": "blob", - "type": "git_blob *", - "comment": "the blob to filter" + "name": "out", + "type": "git_diff_stats **", + "comment": "Structure containing the diff statistics." }, { - "name": "target", - "type": "git_writestream *", - "comment": "the stream into which the data will be written" + "name": "diff", + "type": "git_diff *", + "comment": "A git_diff generated by one of the above functions." } ], - "argline": "git_filter_list *filters, git_blob *blob, git_writestream *target", - "sig": "git_filter_list *::git_blob *::git_writestream *", + "argline": "git_diff_stats **out, git_diff *diff", + "sig": "git_diff_stats **::git_diff *", "return": { "type": "int", - "comment": null + "comment": " 0 on success; non-zero on error" }, - "description": "

Apply a filter list to a blob as a stream

\n", + "description": "

Accumulate diff statistics for all patches.

\n", "comments": "", - "group": "filter" + "group": "diff", + "examples": { "diff.c": ["ex/v1.9.1/diff.html#git_diff_get_stats-13"] } }, - "git_filter_list_free": { + "git_diff_stats_files_changed": { "type": "function", - "file": "filter.h", - "line": 210, - "lineto": 210, + "file": "git2/diff.h", + "line": 1408, + "lineto": 1409, "args": [ { - "name": "filters", - "type": "git_filter_list *", - "comment": "A git_filter_list created by `git_filter_list_load`" + "name": "stats", + "type": "const git_diff_stats *", + "comment": "A `git_diff_stats` generated by one of the above functions." } ], - "argline": "git_filter_list *filters", - "sig": "git_filter_list *", - "return": { - "type": "void", - "comment": null + "argline": "const git_diff_stats *stats", + "sig": "const git_diff_stats *", + "return": { + "type": "size_t", + "comment": " total number of files changed in the diff" }, - "description": "

Free a git_filter_list

\n", + "description": "

Get the total number of files changed in a diff

\n", "comments": "", - "group": "filter" + "group": "diff" }, - "git_libgit2_init": { + "git_diff_stats_insertions": { "type": "function", - "file": "global.h", - "line": 26, - "lineto": 26, - "args": [], - "argline": "", - "sig": "", + "file": "git2/diff.h", + "line": 1417, + "lineto": 1418, + "args": [ + { + "name": "stats", + "type": "const git_diff_stats *", + "comment": "A `git_diff_stats` generated by one of the above functions." + } + ], + "argline": "const git_diff_stats *stats", + "sig": "const git_diff_stats *", "return": { - "type": "int", - "comment": " the number of initializations of the library, or an error code." + "type": "size_t", + "comment": " total number of insertions in the diff" }, - "description": "

Init the global state

\n", - "comments": "

This function must be called before any other libgit2 function in order to set up global state and threading.

\n\n

This function may be called multiple times - it will return the number of times the initialization has been called (including this one) that have not subsequently been shutdown.

\n", - "group": "libgit2", - "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_libgit2_init-8" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_libgit2_init-10" - ], - "describe.c": [ - "ex/HEAD/describe.html#git_libgit2_init-4" - ], - "diff.c": [ - "ex/HEAD/diff.html#git_libgit2_init-13" - ], - "general.c": [ - "ex/HEAD/general.html#git_libgit2_init-34" - ], - "init.c": [ - "ex/HEAD/init.html#git_libgit2_init-2" - ], - "log.c": [ - "ex/HEAD/log.html#git_libgit2_init-31" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_libgit2_init-12" - ], - "remote.c": [ - "ex/HEAD/remote.html#git_libgit2_init-2" - ], - "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_libgit2_init-1" - ], - "status.c": [ - "ex/HEAD/status.html#git_libgit2_init-1" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_libgit2_init-3" - ] - } + "description": "

Get the total number of insertions in a diff

\n", + "comments": "", + "group": "diff" }, - "git_libgit2_shutdown": { + "git_diff_stats_deletions": { "type": "function", - "file": "global.h", - "line": 39, - "lineto": 39, - "args": [], - "argline": "", - "sig": "", + "file": "git2/diff.h", + "line": 1426, + "lineto": 1427, + "args": [ + { + "name": "stats", + "type": "const git_diff_stats *", + "comment": "A `git_diff_stats` generated by one of the above functions." + } + ], + "argline": "const git_diff_stats *stats", + "sig": "const git_diff_stats *", "return": { - "type": "int", - "comment": " the number of remaining initializations of the library, or an\n error code." + "type": "size_t", + "comment": " total number of deletions in the diff" }, - "description": "

Shutdown the global state

\n", - "comments": "

Clean up the global state and threading context after calling it as many times as git_libgit2_init() was called - it will return the number of remainining initializations that have not been shutdown (after this one).

\n", - "group": "libgit2", - "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_libgit2_shutdown-9" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_libgit2_shutdown-11" - ], - "describe.c": [ - "ex/HEAD/describe.html#git_libgit2_shutdown-5" - ], - "diff.c": [ - "ex/HEAD/diff.html#git_libgit2_shutdown-14" - ], - "init.c": [ - "ex/HEAD/init.html#git_libgit2_shutdown-3" - ], - "log.c": [ - "ex/HEAD/log.html#git_libgit2_shutdown-32" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_libgit2_shutdown-13" - ], - "remote.c": [ - "ex/HEAD/remote.html#git_libgit2_shutdown-3" - ], - "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_libgit2_shutdown-2" - ], - "status.c": [ - "ex/HEAD/status.html#git_libgit2_shutdown-2" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_libgit2_shutdown-4" - ] - } + "description": "

Get the total number of deletions in a diff

\n", + "comments": "", + "group": "diff" }, - "git_graph_ahead_behind": { + "git_diff_stats_to_buf": { "type": "function", - "file": "graph.h", - "line": 37, - "lineto": 37, + "file": "git2/diff.h", + "line": 1438, + "lineto": 1442, "args": [ { - "name": "ahead", - "type": "size_t *", - "comment": "number of unique from commits in `upstream`" - }, - { - "name": "behind", - "type": "size_t *", - "comment": "number of unique from commits in `local`" + "name": "out", + "type": "git_buf *", + "comment": "buffer to store the formatted diff statistics in." }, { - "name": "repo", - "type": "git_repository *", - "comment": "the repository where the commits exist" + "name": "stats", + "type": "const git_diff_stats *", + "comment": "A `git_diff_stats` generated by one of the above functions." }, { - "name": "local", - "type": "const git_oid *", - "comment": "the commit for local" + "name": "format", + "type": "git_diff_stats_format_t", + "comment": "Formatting option." }, { - "name": "upstream", - "type": "const git_oid *", - "comment": "the commit for upstream" + "name": "width", + "type": "size_t", + "comment": "Target width for output (only affects GIT_DIFF_STATS_FULL)" } ], - "argline": "size_t *ahead, size_t *behind, git_repository *repo, const git_oid *local, const git_oid *upstream", - "sig": "size_t *::size_t *::git_repository *::const git_oid *::const git_oid *", + "argline": "git_buf *out, const git_diff_stats *stats, git_diff_stats_format_t format, size_t width", + "sig": "git_buf *::const git_diff_stats *::git_diff_stats_format_t::size_t", "return": { "type": "int", - "comment": null + "comment": " 0 on success; non-zero on error" }, - "description": "

Count the number of unique commits between two commit objects

\n", - "comments": "

There is no need for branches containing the commits to have any upstream relationship, but it helps to think of one as a branch and the other as its upstream, the ahead and behind values will be what git would report for the branches.

\n", - "group": "graph" + "description": "

Print diff statistics to a git_buf.

\n", + "comments": "", + "group": "diff", + "examples": { "diff.c": ["ex/v1.9.1/diff.html#git_diff_stats_to_buf-14"] } }, - "git_graph_descendant_of": { + "git_diff_stats_free": { "type": "function", - "file": "graph.h", - "line": 51, - "lineto": 54, + "file": "git2/diff.h", + "line": 1450, + "lineto": 1450, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": null - }, - { - "name": "commit", - "type": "const git_oid *", - "comment": "a previously loaded commit." - }, - { - "name": "ancestor", - "type": "const git_oid *", - "comment": "a potential ancestor commit." + "name": "stats", + "type": "git_diff_stats *", + "comment": "The previously created statistics object;\n cannot be used after free." } ], - "argline": "git_repository *repo, const git_oid *commit, const git_oid *ancestor", - "sig": "git_repository *::const git_oid *::const git_oid *", - "return": { - "type": "int", - "comment": " 1 if the given commit is a descendant of the potential ancestor,\n 0 if not, error code otherwise." - }, - "description": "

Determine if a commit is the descendant of another commit.

\n", - "comments": "

Note that a commit is not considered a descendant of itself, in contrast to git merge-base --is-ancestor.

\n", - "group": "graph" + "argline": "git_diff_stats *stats", + "sig": "git_diff_stats *", + "return": { "type": "void", "comment": null }, + "description": "

Deallocate a git_diff_stats.

\n", + "comments": "", + "group": "diff", + "examples": { "diff.c": ["ex/v1.9.1/diff.html#git_diff_stats_free-15"] } }, - "git_ignore_add_rule": { + "git_diff_patchid_options_init": { "type": "function", - "file": "ignore.h", - "line": 37, - "lineto": 39, + "file": "git2/diff.h", + "line": 1479, + "lineto": 1481, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "The repository to add ignore rules to." + "name": "opts", + "type": "git_diff_patchid_options *", + "comment": "The `git_diff_patchid_options` struct to initialize." }, { - "name": "rules", - "type": "const char *", - "comment": "Text of rules, a la the contents of a .gitignore file.\n It is okay to have multiple rules in the text; if so,\n each rule should be terminated with a newline." + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_DIFF_PATCHID_OPTIONS_VERSION`." } ], - "argline": "git_repository *repo, const char *rules", - "sig": "git_repository *::const char *", + "argline": "git_diff_patchid_options *opts, unsigned int version", + "sig": "git_diff_patchid_options *::unsigned int", "return": { "type": "int", - "comment": " 0 on success" + "comment": " Zero on success; -1 on failure." }, - "description": "

Add ignore rules for a repository.

\n", - "comments": "

Excludesfile rules (i.e. .gitignore rules) are generally read from .gitignore files in the repository tree or from a shared system file only if a "core.excludesfile" config value is set. The library also keeps a set of per-repository internal ignores that can be configured in-memory and will not persist. This function allows you to add to that internal rules list.

\n\n

Example usage:

\n\n
 error = git_ignore_add_rule(myrepo, "*.c/ with space");\n
\n\n

This would add three rules to the ignores.

\n", - "group": "ignore" + "description": "

Initialize git_diff_patchid_options structure

\n", + "comments": "

Initializes a git_diff_patchid_options with default values. Equivalent to creating an instance with GIT_DIFF_PATCHID_OPTIONS_INIT.

\n", + "group": "diff" }, - "git_ignore_clear_internal_rules": { + "git_diff_patchid": { "type": "function", - "file": "ignore.h", - "line": 52, - "lineto": 53, + "file": "git2/diff.h", + "line": 1502, + "lineto": 1502, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "The repository to remove ignore rules from." + "name": "out", + "type": "git_oid *", + "comment": "Pointer where the calculated patch ID should be stored" + }, + { + "name": "diff", + "type": "git_diff *", + "comment": "The diff to calculate the ID for" + }, + { + "name": "opts", + "type": "git_diff_patchid_options *", + "comment": "Options for how to calculate the patch ID. This is\n intended for future changes, as currently no options are\n available." } ], - "argline": "git_repository *repo", - "sig": "git_repository *", + "argline": "git_oid *out, git_diff *diff, git_diff_patchid_options *opts", + "sig": "git_oid *::git_diff *::git_diff_patchid_options *", "return": { "type": "int", - "comment": " 0 on success" + "comment": " 0 on success, an error code otherwise." }, - "description": "

Clear ignore rules that were explicitly added.

\n", - "comments": "

Resets to the default internal ignore rules. This will not turn off rules in .gitignore files that actually exist in the filesystem.

\n\n

The default internal ignores ignore ".", ".." and ".git" entries.

\n", - "group": "ignore" + "description": "

Calculate the patch ID for the given patch.

\n", + "comments": "

Calculate a stable patch ID for the given patch by summing the hash of the file diffs, ignoring whitespace and line numbers. This can be used to derive whether two diffs are the same with a high probability.

\n\n

Currently, this function only calculates stable patch IDs, as defined in git-patch-id(1), and should in fact generate the same IDs as the upstream git project does.

\n", + "group": "diff" }, - "git_ignore_path_is_ignored": { + "git_email_create_from_commit": { "type": "function", - "file": "ignore.h", - "line": 71, - "lineto": 74, + "file": "git2/email.h", + "line": 99, + "lineto": 102, "args": [ { - "name": "ignored", - "type": "int *", - "comment": "boolean returning 0 if the file is not ignored, 1 if it is" + "name": "out", + "type": "git_buf *", + "comment": "buffer to store the e-mail patch in" }, { - "name": "repo", - "type": "git_repository *", - "comment": "a repository object" + "name": "commit", + "type": "git_commit *", + "comment": "commit to create a patch for" }, { - "name": "path", - "type": "const char *", - "comment": "the file to check ignores for, relative to the repo's workdir." + "name": "opts", + "type": "const git_email_create_options *", + "comment": "email creation options" } ], - "argline": "int *ignored, git_repository *repo, const char *path", - "sig": "int *::git_repository *::const char *", + "argline": "git_buf *out, git_commit *commit, const git_email_create_options *opts", + "sig": "git_buf *::git_commit *::const git_email_create_options *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a diff for a commit in mbox format for sending via email.\n The commit must not be a merge commit.

\n", + "comments": "", + "group": "email" + }, + "git_error_last": { + "type": "function", + "file": "git2/errors.h", + "line": 149, + "lineto": 149, + "args": [], + "argline": "", + "sig": "", "return": { - "type": "int", - "comment": " 0 if ignore rules could be processed for the file (regardless\n of whether it exists or not), or an error \n<\n 0 if they could not." + "type": "const git_error *", + "comment": " A pointer to a `git_error` object that describes the error." }, - "description": "

Test if the ignore rules apply to a given path.

\n", - "comments": "

This function checks the ignore rules to see if they would apply to the given file. This indicates if the file would be ignored regardless of whether the file is already in the index or committed to the repository.

\n\n

One way to think of this is if you were to do "git add ." on the directory containing the file, would it be added or not?

\n", - "group": "ignore" + "description": "

Return the last git_error object that was generated for the\n current thread.

\n", + "comments": "

This function will never return NULL.

\n\n

Callers should not rely on this to determine whether an error has occurred. For error checking, callers should examine the return codes of libgit2 functions.

\n\n

This call can only reliably report error messages when an error has occurred. (It may contain stale information if it is called after a different function that succeeds.)

\n\n

The memory for this object is managed by libgit2. It should not be freed.

\n", + "group": "error", + "examples": { + "checkout.c": [ + "ex/v1.9.1/checkout.html#git_error_last-11", + "ex/v1.9.1/checkout.html#git_error_last-12", + "ex/v1.9.1/checkout.html#git_error_last-13", + "ex/v1.9.1/checkout.html#git_error_last-14" + ], + "commit.c": ["ex/v1.9.1/commit.html#git_error_last-2"], + "config.c": [ + "ex/v1.9.1/config.html#git_error_last-6", + "ex/v1.9.1/config.html#git_error_last-7", + "ex/v1.9.1/config.html#git_error_last-8" + ], + "general.c": ["ex/v1.9.1/general.html#git_error_last-33"], + "merge.c": [ + "ex/v1.9.1/merge.html#git_error_last-8", + "ex/v1.9.1/merge.html#git_error_last-9" + ] + } }, - "git_index_open": { + "git_filter_list_load": { "type": "function", - "file": "index.h", - "line": 203, - "lineto": 203, + "file": "git2/filter.h", + "line": 138, + "lineto": 144, "args": [ { - "name": "out", - "type": "git_index **", - "comment": "the pointer for the new index" + "name": "filters", + "type": "git_filter_list **", + "comment": "Output newly created git_filter_list (or NULL)" }, { - "name": "index_path", + "name": "repo", + "type": "git_repository *", + "comment": "Repository object that contains `path`" + }, + { + "name": "blob", + "type": "git_blob *", + "comment": "The blob to which the filter will be applied (if known)" + }, + { + "name": "path", "type": "const char *", - "comment": "the path to the index file in disk" + "comment": "Relative path of the file to be filtered" + }, + { + "name": "mode", + "type": "git_filter_mode_t", + "comment": "Filtering direction (WT->ODB or ODB->WT)" + }, + { + "name": "flags", + "type": "uint32_t", + "comment": "Combination of `git_filter_flag_t` flags" } ], - "argline": "git_index **out, const char *index_path", - "sig": "git_index **::const char *", + "argline": "git_filter_list **filters, git_repository *repo, git_blob *blob, const char *path, git_filter_mode_t mode, uint32_t flags", + "sig": "git_filter_list **::git_repository *::git_blob *::const char *::git_filter_mode_t::uint32_t", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " 0 on success (which could still return NULL if no filters are\n needed for the requested file), \n<\n0 on error" }, - "description": "

Create a new bare Git index object as a memory representation\n of the Git index file in 'index_path', without a repository\n to back it.

\n", - "comments": "

Since there is no ODB or working directory behind this index, any Index methods which rely on these (e.g. index_add_bypath) will fail with the GIT_ERROR error code.

\n\n

If you need to access the index of an actual repository, use the git_repository_index wrapper.

\n\n

The index must be freed once it's no longer in use.

\n", - "group": "index" + "description": "

Load the filter list for a given path.

\n", + "comments": "

This will return 0 (success) but set the output git_filter_list to NULL if no filters are requested for the given file.

\n", + "group": "filter" }, - "git_index_new": { + "git_filter_list_load_ext": { "type": "function", - "file": "index.h", - "line": 216, - "lineto": 216, + "file": "git2/filter.h", + "line": 161, + "lineto": 167, "args": [ { - "name": "out", - "type": "git_index **", - "comment": "the pointer for the new index" + "name": "filters", + "type": "git_filter_list **", + "comment": "Output newly created git_filter_list (or NULL)" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "Repository object that contains `path`" + }, + { + "name": "blob", + "type": "git_blob *", + "comment": "The blob to which the filter will be applied (if known)" + }, + { + "name": "path", + "type": "const char *", + "comment": "Relative path of the file to be filtered" + }, + { + "name": "mode", + "type": "git_filter_mode_t", + "comment": "Filtering direction (WT->ODB or ODB->WT)" + }, + { + "name": "opts", + "type": "git_filter_options *", + "comment": "The `git_filter_options` to use when loading filters" } ], - "argline": "git_index **out", - "sig": "git_index **", + "argline": "git_filter_list **filters, git_repository *repo, git_blob *blob, const char *path, git_filter_mode_t mode, git_filter_options *opts", + "sig": "git_filter_list **::git_repository *::git_blob *::const char *::git_filter_mode_t::git_filter_options *", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " 0 on success (which could still return NULL if no filters are\n needed for the requested file), \n<\n0 on error" }, - "description": "

Create an in-memory index object.

\n", - "comments": "

This index object cannot be read/written to the filesystem, but may be used to perform in-memory index operations.

\n\n

The index must be freed once it's no longer in use.

\n", - "group": "index" + "description": "

Load the filter list for a given path.

\n", + "comments": "

This will return 0 (success) but set the output git_filter_list to NULL if no filters are requested for the given file.

\n", + "group": "filter" }, - "git_index_free": { + "git_filter_list_contains": { "type": "function", - "file": "index.h", - "line": 223, - "lineto": 223, + "file": "git2/filter.h", + "line": 181, + "lineto": 183, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "an existing index object" + "name": "filters", + "type": "git_filter_list *", + "comment": "A loaded git_filter_list (or NULL)" + }, + { + "name": "name", + "type": "const char *", + "comment": "The name of the filter to query" } ], - "argline": "git_index *index", - "sig": "git_index *", + "argline": "git_filter_list *filters, const char *name", + "sig": "git_filter_list *::const char *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 1 if the filter is in the list, 0 otherwise" }, - "description": "

Free an existing index object.

\n", - "comments": "", - "group": "index", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_index_free-35" - ], - "init.c": [ - "ex/HEAD/init.html#git_index_free-4" - ] - } + "description": "

Query the filter list to see if a given filter (by name) will run.\n The built-in filters "crlf" and "ident" can be queried, otherwise this\n is the name of the filter specified by the filter attribute.

\n", + "comments": "

This will return 0 if the given filter is not in the list, or 1 if the filter will be applied.

\n", + "group": "filter" }, - "git_index_owner": { + "git_filter_list_apply_to_buffer": { "type": "function", - "file": "index.h", - "line": 231, - "lineto": 231, + "file": "git2/filter.h", + "line": 194, + "lineto": 198, "args": [ { - "name": "index", - "type": "const git_index *", - "comment": "The index" + "name": "out", + "type": "git_buf *", + "comment": "Buffer to store the result of the filtering" + }, + { + "name": "filters", + "type": "git_filter_list *", + "comment": "A loaded git_filter_list (or NULL)" + }, + { + "name": "in", + "type": "const char *", + "comment": "Buffer containing the data to filter" + }, + { + "name": "in_len", + "type": "size_t", + "comment": "The length of the input buffer" } ], - "argline": "const git_index *index", - "sig": "const git_index *", + "argline": "git_buf *out, git_filter_list *filters, const char *in, size_t in_len", + "sig": "git_buf *::git_filter_list *::const char *::size_t", "return": { - "type": "git_repository *", - "comment": " A pointer to the repository" + "type": "int", + "comment": " 0 on success, an error code otherwise" }, - "description": "

Get the repository this index relates to

\n", + "description": "

Apply filter list to a data buffer.

\n", "comments": "", - "group": "index" + "group": "filter" }, - "git_index_caps": { + "git_filter_list_apply_to_file": { "type": "function", - "file": "index.h", - "line": 239, - "lineto": 239, + "file": "git2/filter.h", + "line": 210, + "lineto": 214, "args": [ { - "name": "index", - "type": "const git_index *", - "comment": "An existing index object" + "name": "out", + "type": "git_buf *", + "comment": "buffer into which to store the filtered file" + }, + { + "name": "filters", + "type": "git_filter_list *", + "comment": "the list of filters to apply" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository in which to perform the filtering" + }, + { + "name": "path", + "type": "const char *", + "comment": "the path of the file to filter, a relative path will be\n taken as relative to the workdir" } ], - "argline": "const git_index *index", - "sig": "const git_index *", - "return": { - "type": "int", - "comment": " A combination of GIT_INDEXCAP values" - }, - "description": "

Read index capabilities flags.

\n", + "argline": "git_buf *out, git_filter_list *filters, git_repository *repo, const char *path", + "sig": "git_buf *::git_filter_list *::git_repository *::const char *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Apply a filter list to the contents of a file on disk

\n", "comments": "", - "group": "index" + "group": "filter" }, - "git_index_set_caps": { + "git_filter_list_apply_to_blob": { "type": "function", - "file": "index.h", - "line": 252, - "lineto": 252, + "file": "git2/filter.h", + "line": 224, + "lineto": 227, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "An existing index object" + "name": "out", + "type": "git_buf *", + "comment": "buffer into which to store the filtered file" }, { - "name": "caps", - "type": "int", - "comment": "A combination of GIT_INDEXCAP values" + "name": "filters", + "type": "git_filter_list *", + "comment": "the list of filters to apply" + }, + { + "name": "blob", + "type": "git_blob *", + "comment": "the blob to filter" } ], - "argline": "git_index *index, int caps", - "sig": "git_index *::int", - "return": { - "type": "int", - "comment": " 0 on success, -1 on failure" - }, - "description": "

Set index capabilities flags.

\n", - "comments": "

If you pass GIT_INDEXCAP_FROM_OWNER for the caps, then the capabilities will be read from the config of the owner object, looking at core.ignorecase, core.filemode, core.symlinks.

\n", - "group": "index" + "argline": "git_buf *out, git_filter_list *filters, git_blob *blob", + "sig": "git_buf *::git_filter_list *::git_blob *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Apply a filter list to the contents of a blob

\n", + "comments": "", + "group": "filter" }, - "git_index_version": { + "git_filter_list_stream_buffer": { "type": "function", - "file": "index.h", - "line": 264, - "lineto": 264, + "file": "git2/filter.h", + "line": 238, + "lineto": 242, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "An existing index object" - } - ], - "argline": "git_index *index", - "sig": "git_index *", - "return": { - "type": "unsigned int", - "comment": " the index version" - }, - "description": "

Get index on-disk version.

\n", - "comments": "

Valid return values are 2, 3, or 4. If 3 is returned, an index with version 2 may be written instead, if the extension data in version 3 is not necessary.

\n", - "group": "index" + "name": "filters", + "type": "git_filter_list *", + "comment": "the list of filters to apply" + }, + { + "name": "buffer", + "type": "const char *", + "comment": "the buffer to filter" + }, + { + "name": "len", + "type": "size_t", + "comment": "the size of the buffer" + }, + { + "name": "target", + "type": "git_writestream *", + "comment": "the stream into which the data will be written" + } + ], + "argline": "git_filter_list *filters, const char *buffer, size_t len, git_writestream *target", + "sig": "git_filter_list *::const char *::size_t::git_writestream *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Apply a filter list to an arbitrary buffer as a stream

\n", + "comments": "", + "group": "filter" }, - "git_index_set_version": { + "git_filter_list_stream_file": { "type": "function", - "file": "index.h", - "line": 277, - "lineto": 277, + "file": "git2/filter.h", + "line": 254, + "lineto": 258, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "An existing index object" + "name": "filters", + "type": "git_filter_list *", + "comment": "the list of filters to apply" }, { - "name": "version", - "type": "unsigned int", - "comment": "The new version number" + "name": "repo", + "type": "git_repository *", + "comment": "the repository in which to perform the filtering" + }, + { + "name": "path", + "type": "const char *", + "comment": "the path of the file to filter, a relative path will be\n taken as relative to the workdir" + }, + { + "name": "target", + "type": "git_writestream *", + "comment": "the stream into which the data will be written" } ], - "argline": "git_index *index, unsigned int version", - "sig": "git_index *::unsigned int", - "return": { - "type": "int", - "comment": " 0 on success, -1 on failure" - }, - "description": "

Set index on-disk version.

\n", - "comments": "

Valid values are 2, 3, or 4. If 2 is given, git_index_write may write an index with version 3 instead, if necessary to accurately represent the index.

\n", - "group": "index" + "argline": "git_filter_list *filters, git_repository *repo, const char *path, git_writestream *target", + "sig": "git_filter_list *::git_repository *::const char *::git_writestream *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Apply a filter list to a file as a stream

\n", + "comments": "", + "group": "filter" }, - "git_index_read": { + "git_filter_list_stream_blob": { "type": "function", - "file": "index.h", - "line": 296, - "lineto": 296, + "file": "git2/filter.h", + "line": 268, + "lineto": 271, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "an existing index object" + "name": "filters", + "type": "git_filter_list *", + "comment": "the list of filters to apply" }, { - "name": "force", - "type": "int", - "comment": "if true, always reload, vs. only read if file has changed" + "name": "blob", + "type": "git_blob *", + "comment": "the blob to filter" + }, + { + "name": "target", + "type": "git_writestream *", + "comment": "the stream into which the data will be written" } ], - "argline": "git_index *index, int force", - "sig": "git_index *::int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Update the contents of an existing index object in memory by reading\n from the hard disk.

\n", - "comments": "

If force is true, this performs a "hard" read that discards in-memory changes and always reloads the on-disk index data. If there is no on-disk version, the index will be cleared.

\n\n

If force is false, this does a "soft" read that reloads the index data from disk only if it has changed since the last time it was loaded. Purely in-memory index data will be untouched. Be aware: if there are changes on disk, unwritten in-memory changes are discarded.

\n", - "group": "index" + "argline": "git_filter_list *filters, git_blob *blob, git_writestream *target", + "sig": "git_filter_list *::git_blob *::git_writestream *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Apply a filter list to a blob as a stream

\n", + "comments": "", + "group": "filter" }, - "git_index_write": { + "git_filter_list_free": { "type": "function", - "file": "index.h", - "line": 305, - "lineto": 305, + "file": "git2/filter.h", + "line": 278, + "lineto": 278, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "an existing index object" + "name": "filters", + "type": "git_filter_list *", + "comment": "A git_filter_list created by `git_filter_list_load`" } ], - "argline": "git_index *index", - "sig": "git_index *", + "argline": "git_filter_list *filters", + "sig": "git_filter_list *", + "return": { "type": "void", "comment": null }, + "description": "

Free a git_filter_list

\n", + "comments": "", + "group": "filter" + }, + "git_libgit2_init": { + "type": "function", + "file": "git2/global.h", + "line": 32, + "lineto": 32, + "args": [], + "argline": "", + "sig": "", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " the number of initializations of the library, or an error code." }, - "description": "

Write an existing index object from memory back to disk\n using an atomic file lock.

\n", - "comments": "", - "group": "index" + "description": "

Init the global state

\n", + "comments": "

This function must be called before any other libgit2 function in order to set up global state and threading.

\n\n

This function may be called multiple times - it will return the number of times the initialization has been called (including this one) that have not subsequently been shutdown.

\n", + "group": "libgit2", + "examples": { + "general.c": ["ex/v1.9.1/general.html#git_libgit2_init-34"] + } }, - "git_index_path": { + "git_libgit2_shutdown": { "type": "function", - "file": "index.h", - "line": 313, - "lineto": 313, - "args": [ - { - "name": "index", - "type": "const git_index *", - "comment": "an existing index object" - } - ], - "argline": "const git_index *index", - "sig": "const git_index *", + "file": "git2/global.h", + "line": 45, + "lineto": 45, + "args": [], + "argline": "", + "sig": "", "return": { - "type": "const char *", - "comment": " path to index file or NULL for in-memory index" + "type": "int", + "comment": " the number of remaining initializations of the library, or an\n error code." }, - "description": "

Get the full path to the index file on disk.

\n", - "comments": "", - "group": "index" + "description": "

Shutdown the global state

\n", + "comments": "

Clean up the global state and threading context after calling it as many times as git_libgit2_init() was called - it will return the number of remainining initializations that have not been shutdown (after this one).

\n", + "group": "libgit2" }, - "git_index_checksum": { + "git_graph_ahead_behind": { "type": "function", - "file": "index.h", - "line": 325, - "lineto": 325, + "file": "git2/graph.h", + "line": 38, + "lineto": 38, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "an existing index object" + "name": "ahead", + "type": "size_t *", + "comment": "number of unique from commits in `upstream`" + }, + { + "name": "behind", + "type": "size_t *", + "comment": "number of unique from commits in `local`" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository where the commits exist" + }, + { + "name": "local", + "type": "const git_oid *", + "comment": "the commit for local" + }, + { + "name": "upstream", + "type": "const git_oid *", + "comment": "the commit for upstream" } ], - "argline": "git_index *index", - "sig": "git_index *", - "return": { - "type": "const git_oid *", - "comment": " a pointer to the checksum of the index" - }, - "description": "

Get the checksum of the index

\n", - "comments": "

This checksum is the SHA-1 hash over the index file (except the last 20 bytes which are the checksum itself). In cases where the index does not exist on-disk, it will be zeroed out.

\n", - "group": "index" + "argline": "size_t *ahead, size_t *behind, git_repository *repo, const git_oid *local, const git_oid *upstream", + "sig": "size_t *::size_t *::git_repository *::const git_oid *::const git_oid *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Count the number of unique commits between two commit objects

\n", + "comments": "

There is no need for branches containing the commits to have any upstream relationship, but it helps to think of one as a branch and the other as its upstream, the ahead and behind values will be what git would report for the branches.

\n", + "group": "graph" }, - "git_index_read_tree": { + "git_graph_descendant_of": { "type": "function", - "file": "index.h", - "line": 336, - "lineto": 336, + "file": "git2/graph.h", + "line": 53, + "lineto": 56, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "an existing index object" + "name": "repo", + "type": "git_repository *", + "comment": "the repository where the commits exist" }, { - "name": "tree", - "type": "const git_tree *", - "comment": "tree to read" + "name": "commit", + "type": "const git_oid *", + "comment": "a previously loaded commit" + }, + { + "name": "ancestor", + "type": "const git_oid *", + "comment": "a potential ancestor commit" } ], - "argline": "git_index *index, const git_tree *tree", - "sig": "git_index *::const git_tree *", + "argline": "git_repository *repo, const git_oid *commit, const git_oid *ancestor", + "sig": "git_repository *::const git_oid *::const git_oid *", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " 1 if the given commit is a descendant of the potential ancestor,\n 0 if not, error code otherwise." }, - "description": "

Read a tree into the index file with stats

\n", - "comments": "

The current index contents will be replaced by the specified tree.

\n", - "group": "index" + "description": "

Determine if a commit is the descendant of another commit.

\n", + "comments": "

Note that a commit is not considered a descendant of itself, in contrast to git merge-base --is-ancestor.

\n", + "group": "graph" }, - "git_index_write_tree": { + "git_graph_reachable_from_any": { "type": "function", - "file": "index.h", - "line": 357, - "lineto": 357, + "file": "git2/graph.h", + "line": 69, + "lineto": 73, "args": [ { - "name": "out", - "type": "git_oid *", - "comment": "Pointer where to store the OID of the written tree" + "name": "repo", + "type": "git_repository *", + "comment": "the repository where the commits exist" }, { - "name": "index", - "type": "git_index *", - "comment": "Index to write" - } + "name": "commit", + "type": "const git_oid *", + "comment": "a previously loaded commit" + }, + { + "name": "descendant_array", + "type": "const git_oid []", + "comment": "oids of the commits" + }, + { + "name": "length", + "type": "size_t", + "comment": "the number of commits in the provided `descendant_array`" + } ], - "argline": "git_oid *out, git_index *index", - "sig": "git_oid *::git_index *", + "argline": "git_repository *repo, const git_oid *commit, const git_oid [] descendant_array, size_t length", + "sig": "git_repository *::const git_oid *::const git_oid []::size_t", "return": { "type": "int", - "comment": " 0 on success, GIT_EUNMERGED when the index is not clean\n or an error code" + "comment": " 1 if the given commit is an ancestor of any of the given potential\n descendants, 0 if not, error code otherwise." }, - "description": "

Write the index as a tree

\n", - "comments": "

This method will scan the index and write a representation of its current state back to disk; it recursively creates tree objects for each of the subtrees stored in the index, but only returns the OID of the root tree. This is the OID that can be used e.g. to create a commit.

\n\n

The index instance cannot be bare, and needs to be associated to an existing repository.

\n\n

The index must not contain any file in conflict.

\n", - "group": "index", - "examples": { - "init.c": [ - "ex/HEAD/init.html#git_index_write_tree-5" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_index_write_tree-14" - ] - } + "description": "

Determine if a commit is reachable from any of a list of commits by\n following parent edges.

\n", + "comments": "", + "group": "graph" }, - "git_index_write_tree_to": { + "git_ignore_add_rule": { "type": "function", - "file": "index.h", - "line": 374, - "lineto": 374, + "file": "git2/ignore.h", + "line": 46, + "lineto": 48, "args": [ { - "name": "out", - "type": "git_oid *", - "comment": "Pointer where to store OID of the the written tree" + "name": "repo", + "type": "git_repository *", + "comment": "The repository to add ignore rules to." }, { - "name": "index", - "type": "git_index *", - "comment": "Index to write" + "name": "rules", + "type": "const char *", + "comment": "Text of rules, the contents to add on a .gitignore file.\n It is okay to have multiple rules in the text; if so,\n each rule should be terminated with a newline." + } + ], + "argline": "git_repository *repo, const char *rules", + "sig": "git_repository *::const char *", + "return": { "type": "int", "comment": " 0 on success" }, + "description": "

Add ignore rules for a repository.

\n", + "comments": "

Excludesfile rules (i.e. .gitignore rules) are generally read from .gitignore files in the repository tree or from a shared system file only if a "core.excludesfile" config value is set. The library also keeps a set of per-repository internal ignores that can be configured in-memory and will not persist. This function allows you to add to that internal rules list.

\n\n

Example usage:

\n\n
 error = git_ignore_add_rule(myrepo, "*.c/ with space");\n
\n\n

This would add three rules to the ignores.

\n", + "group": "ignore" + }, + "git_ignore_clear_internal_rules": { + "type": "function", + "file": "git2/ignore.h", + "line": 61, + "lineto": 62, + "args": [ + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository to remove ignore rules from." + } + ], + "argline": "git_repository *repo", + "sig": "git_repository *", + "return": { "type": "int", "comment": " 0 on success" }, + "description": "

Clear ignore rules that were explicitly added.

\n", + "comments": "

Resets to the default internal ignore rules. This will not turn off rules in .gitignore files that actually exist in the filesystem.

\n\n

The default internal ignores ignore ".", ".." and ".git" entries.

\n", + "group": "ignore" + }, + "git_ignore_path_is_ignored": { + "type": "function", + "file": "git2/ignore.h", + "line": 80, + "lineto": 83, + "args": [ + { + "name": "ignored", + "type": "int *", + "comment": "boolean returning 0 if the file is not ignored, 1 if it is" }, { "name": "repo", "type": "git_repository *", - "comment": "Repository where to write the tree" + "comment": "a repository object" + }, + { + "name": "path", + "type": "const char *", + "comment": "the file to check ignores for, relative to the repo's workdir." } ], - "argline": "git_oid *out, git_index *index, git_repository *repo", - "sig": "git_oid *::git_index *::git_repository *", + "argline": "int *ignored, git_repository *repo, const char *path", + "sig": "int *::git_repository *::const char *", "return": { "type": "int", - "comment": " 0 on success, GIT_EUNMERGED when the index is not clean\n or an error code" + "comment": " 0 if ignore rules could be processed for the file (regardless\n of whether it exists or not), or an error \n<\n 0 if they could not." }, - "description": "

Write the index as a tree to the given repository

\n", - "comments": "

This method will do the same as git_index_write_tree, but letting the user choose the repository where the tree will be written.

\n\n

The index must not contain any file in conflict.

\n", - "group": "index" + "description": "

Test if the ignore rules apply to a given path.

\n", + "comments": "

This function checks the ignore rules to see if they would apply to the given file. This indicates if the file would be ignored regardless of whether the file is already in the index or committed to the repository.

\n\n

One way to think of this is if you were to do "git check-ignore --no-index" on the given file, would it be shown or not?

\n", + "group": "ignore" }, - "git_index_entrycount": { + "git_index_open": { "type": "function", - "file": "index.h", - "line": 393, - "lineto": 393, + "file": "git2/index.h", + "line": 278, + "lineto": 278, "args": [ { - "name": "index", - "type": "const git_index *", - "comment": "an existing index object" + "name": "index_out", + "type": "git_index **", + "comment": "the pointer for the new index" + }, + { + "name": "index_path", + "type": "const char *", + "comment": "the path to the index file in disk" } ], - "argline": "const git_index *index", - "sig": "const git_index *", - "return": { - "type": "size_t", - "comment": " integer of count of current entries" - }, - "description": "

Get the count of entries currently in the index

\n", - "comments": "", - "group": "index", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_index_entrycount-36" - ] - } + "argline": "git_index **index_out, const char *index_path", + "sig": "git_index **::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a new bare Git index object as a memory representation\n of the Git index file in 'index_path', without a repository\n to back it.

\n", + "comments": "

Since there is no ODB or working directory behind this index, any Index methods which rely on these (e.g. index_add_bypath) will fail with the GIT_ERROR error code.

\n\n

If you need to access the index of an actual repository, use the git_repository_index wrapper.

\n\n

The index must be freed once it's no longer in use.

\n", + "group": "index" }, - "git_index_clear": { + "git_index_new": { "type": "function", - "file": "index.h", - "line": 404, - "lineto": 404, + "file": "git2/index.h", + "line": 291, + "lineto": 291, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "an existing index object" + "name": "index_out", + "type": "git_index **", + "comment": "the pointer for the new index" } ], - "argline": "git_index *index", - "sig": "git_index *", - "return": { - "type": "int", - "comment": " 0 on success, error code \n<\n 0 on failure" - }, - "description": "

Clear the contents (all the entries) of an index object.

\n", - "comments": "

This clears the index object in memory; changes must be explicitly written to disk for them to take effect persistently.

\n", + "argline": "git_index **index_out", + "sig": "git_index **", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create an in-memory index object.

\n", + "comments": "

This index object cannot be read/written to the filesystem, but may be used to perform in-memory index operations.

\n\n

The index must be freed once it's no longer in use.

\n", "group": "index" }, - "git_index_get_byindex": { + "git_index_free": { "type": "function", - "file": "index.h", - "line": 417, - "lineto": 418, + "file": "git2/index.h", + "line": 300, + "lineto": 300, "args": [ { "name": "index", "type": "git_index *", "comment": "an existing index object" - }, - { - "name": "n", - "type": "size_t", - "comment": "the position of the entry" } ], - "argline": "git_index *index, size_t n", - "sig": "git_index *::size_t", - "return": { - "type": "const git_index_entry *", - "comment": " a pointer to the entry; NULL if out of bounds" - }, - "description": "

Get a pointer to one of the entries in the index

\n", - "comments": "

The entry is not modifiable and should not be freed. Because the git_index_entry struct is a publicly defined struct, you should be able to make your own permanent copy of the data if necessary.

\n", + "argline": "git_index *index", + "sig": "git_index *", + "return": { "type": "void", "comment": null }, + "description": "

Free an existing index object.

\n", + "comments": "", "group": "index", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_index_get_byindex-37" - ] + "add.c": ["ex/v1.9.1/add.html#git_index_free-1"], + "commit.c": ["ex/v1.9.1/commit.html#git_index_free-3"], + "general.c": ["ex/v1.9.1/general.html#git_index_free-35"], + "init.c": ["ex/v1.9.1/init.html#git_index_free-2"], + "ls-files.c": ["ex/v1.9.1/ls-files.html#git_index_free-1"] } }, - "git_index_get_bypath": { + "git_index_owner": { "type": "function", - "file": "index.h", - "line": 432, - "lineto": 433, + "file": "git2/index.h", + "line": 308, + "lineto": 308, "args": [ - { - "name": "index", - "type": "git_index *", - "comment": "an existing index object" - }, - { - "name": "path", - "type": "const char *", - "comment": "path to search" - }, - { - "name": "stage", - "type": "int", - "comment": "stage to search" - } + { "name": "index", "type": "const git_index *", "comment": "The index" } ], - "argline": "git_index *index, const char *path, int stage", - "sig": "git_index *::const char *::int", + "argline": "const git_index *index", + "sig": "const git_index *", "return": { - "type": "const git_index_entry *", - "comment": " a pointer to the entry; NULL if it was not found" + "type": "git_repository *", + "comment": " A pointer to the repository" }, - "description": "

Get a pointer to one of the entries in the index

\n", - "comments": "

The entry is not modifiable and should not be freed. Because the git_index_entry struct is a publicly defined struct, you should be able to make your own permanent copy of the data if necessary.

\n", + "description": "

Get the repository this index relates to

\n", + "comments": "", "group": "index" }, - "git_index_remove": { + "git_index_caps": { "type": "function", - "file": "index.h", - "line": 443, - "lineto": 443, + "file": "git2/index.h", + "line": 316, + "lineto": 316, "args": [ { "name": "index", - "type": "git_index *", - "comment": "an existing index object" - }, - { - "name": "path", - "type": "const char *", - "comment": "path to search" - }, - { - "name": "stage", - "type": "int", - "comment": "stage to search" + "type": "const git_index *", + "comment": "An existing index object" } ], - "argline": "git_index *index, const char *path, int stage", - "sig": "git_index *::const char *::int", + "argline": "const git_index *index", + "sig": "const git_index *", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " A combination of GIT_INDEX_CAPABILITY values" }, - "description": "

Remove an entry from the index

\n", + "description": "

Read index capabilities flags.

\n", "comments": "", "group": "index" }, - "git_index_remove_directory": { + "git_index_set_caps": { "type": "function", - "file": "index.h", - "line": 453, - "lineto": 454, + "file": "git2/index.h", + "line": 329, + "lineto": 329, "args": [ { "name": "index", "type": "git_index *", - "comment": "an existing index object" - }, - { - "name": "dir", - "type": "const char *", - "comment": "container directory path" + "comment": "An existing index object" }, { - "name": "stage", + "name": "caps", "type": "int", - "comment": "stage to search" + "comment": "A combination of GIT_INDEX_CAPABILITY values" } ], - "argline": "git_index *index, const char *dir, int stage", - "sig": "git_index *::const char *::int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Remove all entries from the index under a given directory

\n", - "comments": "", + "argline": "git_index *index, int caps", + "sig": "git_index *::int", + "return": { "type": "int", "comment": " 0 on success, -1 on failure" }, + "description": "

Set index capabilities flags.

\n", + "comments": "

If you pass GIT_INDEX_CAPABILITY_FROM_OWNER for the caps, then capabilities will be read from the config of the owner object, looking at core.ignorecase, core.filemode, core.symlinks.

\n", "group": "index" }, - "git_index_add": { + "git_index_version": { "type": "function", - "file": "index.h", - "line": 470, - "lineto": 470, + "file": "git2/index.h", + "line": 341, + "lineto": 341, "args": [ { "name": "index", "type": "git_index *", - "comment": "an existing index object" - }, - { - "name": "source_entry", - "type": "const git_index_entry *", - "comment": "new entry object" + "comment": "An existing index object" } ], - "argline": "git_index *index, const git_index_entry *source_entry", - "sig": "git_index *::const git_index_entry *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Add or update an index entry from an in-memory struct

\n", - "comments": "

If a previous index entry exists that has the same path and stage as the given 'source_entry', it will be replaced. Otherwise, the 'source_entry' will be added.

\n\n

A full copy (including the 'path' string) of the given 'source_entry' will be inserted on the index.

\n", + "argline": "git_index *index", + "sig": "git_index *", + "return": { "type": "unsigned int", "comment": " the index version" }, + "description": "

Get index on-disk version.

\n", + "comments": "

Valid return values are 2, 3, or 4. If 3 is returned, an index with version 2 may be written instead, if the extension data in version 3 is not necessary.

\n", "group": "index" }, - "git_index_entry_stage": { + "git_index_set_version": { "type": "function", - "file": "index.h", - "line": 482, - "lineto": 482, + "file": "git2/index.h", + "line": 354, + "lineto": 354, "args": [ { - "name": "entry", - "type": "const git_index_entry *", - "comment": "The entry" + "name": "index", + "type": "git_index *", + "comment": "An existing index object" + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The new version number" } ], - "argline": "const git_index_entry *entry", - "sig": "const git_index_entry *", - "return": { - "type": "int", - "comment": " the stage number" - }, - "description": "

Return the stage number from a git index entry

\n", - "comments": "

This entry is calculated from the entry's flag attribute like this:

\n\n
(entry->flags & GIT_IDXENTRY_STAGEMASK) >> GIT_IDXENTRY_STAGESHIFT\n
\n", + "argline": "git_index *index, unsigned int version", + "sig": "git_index *::unsigned int", + "return": { "type": "int", "comment": " 0 on success, -1 on failure" }, + "description": "

Set index on-disk version.

\n", + "comments": "

Valid values are 2, 3, or 4. If 2 is given, git_index_write may write an index with version 3 instead, if necessary to accurately represent the index.

\n", "group": "index" }, - "git_index_entry_is_conflict": { + "git_index_read": { "type": "function", - "file": "index.h", - "line": 491, - "lineto": 491, + "file": "git2/index.h", + "line": 373, + "lineto": 373, "args": [ { - "name": "entry", - "type": "const git_index_entry *", - "comment": "The entry" + "name": "index", + "type": "git_index *", + "comment": "an existing index object" + }, + { + "name": "force", + "type": "int", + "comment": "if true, always reload, vs. only read if file has changed" } ], - "argline": "const git_index_entry *entry", - "sig": "const git_index_entry *", - "return": { - "type": "int", - "comment": " 1 if the entry is a conflict entry, 0 otherwise" - }, - "description": "

Return whether the given index entry is a conflict (has a high stage\n entry). This is simply shorthand for git_index_entry_stage > 0.

\n", - "comments": "", + "argline": "git_index *index, int force", + "sig": "git_index *::int", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Update the contents of an existing index object in memory by reading\n from the hard disk.

\n", + "comments": "

If force is true, this performs a "hard" read that discards in-memory changes and always reloads the on-disk index data. If there is no on-disk version, the index will be cleared.

\n\n

If force is false, this does a "soft" read that reloads the index data from disk only if it has changed since the last time it was loaded. Purely in-memory index data will be untouched. Be aware: if there are changes on disk, unwritten in-memory changes are discarded.

\n", "group": "index" }, - "git_index_add_bypath": { + "git_index_write": { "type": "function", - "file": "index.h", - "line": 522, - "lineto": 522, + "file": "git2/index.h", + "line": 382, + "lineto": 382, "args": [ { "name": "index", "type": "git_index *", "comment": "an existing index object" - }, - { - "name": "path", - "type": "const char *", - "comment": "filename to add" } ], - "argline": "git_index *index, const char *path", - "sig": "git_index *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Add or update an index entry from a file on disk

\n", - "comments": "

The file path must be relative to the repository's working folder and must be readable.

\n\n

This method will fail in bare index instances.

\n\n

This forces the file to be added to the index, not looking at gitignore rules. Those rules can be evaluated through the git_status APIs (in status.h) before calling this.

\n\n

If this file currently is the result of a merge conflict, this file will no longer be marked as conflicting. The data about the conflict will be moved to the "resolve undo" (REUC) section.

\n", - "group": "index" + "argline": "git_index *index", + "sig": "git_index *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Write an existing index object from memory back to disk\n using an atomic file lock.

\n", + "comments": "", + "group": "index", + "examples": { + "add.c": ["ex/v1.9.1/add.html#git_index_write-2"], + "commit.c": ["ex/v1.9.1/commit.html#git_index_write-4"] + } }, - "git_index_add_frombuffer": { + "git_index_path": { "type": "function", - "file": "index.h", - "line": 551, - "lineto": 554, + "file": "git2/index.h", + "line": 390, + "lineto": 390, "args": [ { "name": "index", - "type": "git_index *", + "type": "const git_index *", "comment": "an existing index object" - }, - { - "name": "entry", - "type": "const git_index_entry *", - "comment": "filename to add" - }, - { - "name": "buffer", - "type": "const void *", - "comment": "data to be written into the blob" - }, - { - "name": "len", - "type": "size_t", - "comment": "length of the data" } ], - "argline": "git_index *index, const git_index_entry *entry, const void *buffer, size_t len", - "sig": "git_index *::const git_index_entry *::const void *::size_t", + "argline": "const git_index *index", + "sig": "const git_index *", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "const char *", + "comment": " path to index file or NULL for in-memory index" }, - "description": "

Add or update an index entry from a buffer in memory

\n", - "comments": "

This method will create a blob in the repository that owns the index and then add the index entry to the index. The path of the entry represents the position of the blob relative to the repository's root folder.

\n\n

If a previous index entry exists that has the same path as the given 'entry', it will be replaced. Otherwise, the 'entry' will be added. The id and the file_size of the 'entry' are updated with the real value of the blob.

\n\n

This forces the file to be added to the index, not looking at gitignore rules. Those rules can be evaluated through the git_status APIs (in status.h) before calling this.

\n\n

If this file currently is the result of a merge conflict, this file will no longer be marked as conflicting. The data about the conflict will be moved to the "resolve undo" (REUC) section.

\n", + "description": "

Get the full path to the index file on disk.

\n", + "comments": "", "group": "index" }, - "git_index_remove_bypath": { + "git_index_checksum": { "type": "function", - "file": "index.h", - "line": 570, - "lineto": 570, + "file": "git2/index.h", + "line": 404, + "lineto": 404, "args": [ { "name": "index", "type": "git_index *", "comment": "an existing index object" - }, - { - "name": "path", - "type": "const char *", - "comment": "filename to remove" } ], - "argline": "git_index *index, const char *path", - "sig": "git_index *::const char *", + "argline": "git_index *index", + "sig": "git_index *", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "const git_oid *", + "comment": " a pointer to the checksum of the index" }, - "description": "

Remove an index entry corresponding to a file on disk

\n", - "comments": "

The file path must be relative to the repository's working folder. It may exist.

\n\n

If this file currently is the result of a merge conflict, this file will no longer be marked as conflicting. The data about the conflict will be moved to the "resolve undo" (REUC) section.

\n", + "description": "

Get the checksum of the index

\n", + "comments": "

This checksum is the SHA-1 hash over the index file (except the last 20 bytes which are the checksum itself). In cases where the index does not exist on-disk, it will be zeroed out.

\n", "group": "index" }, - "git_index_add_all": { + "git_index_read_tree": { "type": "function", - "file": "index.h", - "line": 618, - "lineto": 623, + "file": "git2/index.h", + "line": 416, + "lineto": 416, "args": [ { "name": "index", @@ -8462,179 +8855,127 @@ "comment": "an existing index object" }, { - "name": "pathspec", - "type": "const git_strarray *", - "comment": "array of path patterns" - }, - { - "name": "flags", - "type": "unsigned int", - "comment": "combination of git_index_add_option_t flags" - }, - { - "name": "callback", - "type": "git_index_matched_path_cb", - "comment": "notification callback for each added/updated path (also\n gets index of matching pathspec entry); can be NULL;\n return 0 to add, >0 to skip, \n<\n0 to abort scan." - }, - { - "name": "payload", - "type": "void *", - "comment": "payload passed through to callback function" + "name": "tree", + "type": "const git_tree *", + "comment": "tree to read" } ], - "argline": "git_index *index, const git_strarray *pathspec, unsigned int flags, git_index_matched_path_cb callback, void *payload", - "sig": "git_index *::const git_strarray *::unsigned int::git_index_matched_path_cb::void *", - "return": { - "type": "int", - "comment": " 0 on success, negative callback return value, or error code" - }, - "description": "

Add or update index entries matching files in the working directory.

\n", - "comments": "

This method will fail in bare index instances.

\n\n

The pathspec is a list of file names or shell glob patterns that will be matched against files in the repository's working directory. Each file that matches will be added to the index (either updating an existing entry or adding a new entry). You can disable glob expansion and force exact matching with the GIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH flag.

\n\n

Files that are ignored will be skipped (unlike git_index_add_bypath). If a file is already tracked in the index, then it will be updated even if it is ignored. Pass the GIT_INDEX_ADD_FORCE flag to skip the checking of ignore rules.

\n\n

To emulate git add -A and generate an error if the pathspec contains the exact path of an ignored file (when not using FORCE), add the GIT_INDEX_ADD_CHECK_PATHSPEC flag. This checks that each entry in the pathspec that is an exact match to a filename on disk is either not ignored or already in the index. If this check fails, the function will return GIT_EINVALIDSPEC.

\n\n

To emulate git add -A with the "dry-run" option, just use a callback function that always returns a positive value. See below for details.

\n\n

If any files are currently the result of a merge conflict, those files will no longer be marked as conflicting. The data about the conflicts will be moved to the "resolve undo" (REUC) section.

\n\n

If you provide a callback function, it will be invoked on each matching item in the working directory immediately before it is added to / updated in the index. Returning zero will add the item to the index, greater than zero will skip the item, and less than zero will abort the scan and return that value to the caller.

\n", + "argline": "git_index *index, const git_tree *tree", + "sig": "git_index *::const git_tree *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Read a tree into the index file with stats

\n", + "comments": "

The current index contents will be replaced by the specified tree.

\n", "group": "index" }, - "git_index_remove_all": { + "git_index_write_tree": { "type": "function", - "file": "index.h", - "line": 640, - "lineto": 644, + "file": "git2/index.h", + "line": 437, + "lineto": 437, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "An existing index object" - }, - { - "name": "pathspec", - "type": "const git_strarray *", - "comment": "array of path patterns" - }, - { - "name": "callback", - "type": "git_index_matched_path_cb", - "comment": "notification callback for each removed path (also\n gets index of matching pathspec entry); can be NULL;\n return 0 to add, >0 to skip, \n<\n0 to abort scan." + "name": "out", + "type": "git_oid *", + "comment": "Pointer where to store the OID of the written tree" }, - { - "name": "payload", - "type": "void *", - "comment": "payload passed through to callback function" - } + { "name": "index", "type": "git_index *", "comment": "Index to write" } ], - "argline": "git_index *index, const git_strarray *pathspec, git_index_matched_path_cb callback, void *payload", - "sig": "git_index *::const git_strarray *::git_index_matched_path_cb::void *", + "argline": "git_oid *out, git_index *index", + "sig": "git_oid *::git_index *", "return": { "type": "int", - "comment": " 0 on success, negative callback return value, or error code" + "comment": " 0 on success, GIT_EUNMERGED when the index is not clean\n or an error code" }, - "description": "

Remove all matching index entries.

\n", - "comments": "

If you provide a callback function, it will be invoked on each matching item in the index immediately before it is removed. Return 0 to remove the item, > 0 to skip the item, and < 0 to abort the scan.

\n", - "group": "index" + "description": "

Write the index as a tree

\n", + "comments": "

This method will scan the index and write a representation of its current state back to disk; it recursively creates tree objects for each of the subtrees stored in the index, but only returns the OID of the root tree. This is the OID that can be used e.g. to create a commit.

\n\n

The index instance cannot be bare, and needs to be associated to an existing repository.

\n\n

The index must not contain any file in conflict.

\n", + "group": "index", + "examples": { + "commit.c": ["ex/v1.9.1/commit.html#git_index_write_tree-5"], + "init.c": ["ex/v1.9.1/init.html#git_index_write_tree-3"], + "merge.c": ["ex/v1.9.1/merge.html#git_index_write_tree-10"] + } }, - "git_index_update_all": { + "git_index_write_tree_to": { "type": "function", - "file": "index.h", - "line": 669, - "lineto": 673, + "file": "git2/index.h", + "line": 454, + "lineto": 454, "args": [ { - "name": "index", - "type": "git_index *", - "comment": "An existing index object" - }, - { - "name": "pathspec", - "type": "const git_strarray *", - "comment": "array of path patterns" - }, - { - "name": "callback", - "type": "git_index_matched_path_cb", - "comment": "notification callback for each updated path (also\n gets index of matching pathspec entry); can be NULL;\n return 0 to add, >0 to skip, \n<\n0 to abort scan." + "name": "out", + "type": "git_oid *", + "comment": "Pointer where to store OID of the written tree" }, + { "name": "index", "type": "git_index *", "comment": "Index to write" }, { - "name": "payload", - "type": "void *", - "comment": "payload passed through to callback function" + "name": "repo", + "type": "git_repository *", + "comment": "Repository where to write the tree" } ], - "argline": "git_index *index, const git_strarray *pathspec, git_index_matched_path_cb callback, void *payload", - "sig": "git_index *::const git_strarray *::git_index_matched_path_cb::void *", + "argline": "git_oid *out, git_index *index, git_repository *repo", + "sig": "git_oid *::git_index *::git_repository *", "return": { "type": "int", - "comment": " 0 on success, negative callback return value, or error code" + "comment": " 0 on success, GIT_EUNMERGED when the index is not clean\n or an error code" }, - "description": "

Update all index entries to match the working directory

\n", - "comments": "

This method will fail in bare index instances.

\n\n

This scans the existing index entries and synchronizes them with the working directory, deleting them if the corresponding working directory file no longer exists otherwise updating the information (including adding the latest version of file to the ODB if needed).

\n\n

If you provide a callback function, it will be invoked on each matching item in the index immediately before it is updated (either refreshed or removed depending on working directory state). Return 0 to proceed with updating the item, > 0 to skip the item, and < 0 to abort the scan.

\n", + "description": "

Write the index as a tree to the given repository

\n", + "comments": "

This method will do the same as git_index_write_tree, but letting the user choose the repository where the tree will be written.

\n\n

The index must not contain any file in conflict.

\n", "group": "index" }, - "git_index_find": { + "git_index_entrycount": { "type": "function", - "file": "index.h", - "line": 684, - "lineto": 684, + "file": "git2/index.h", + "line": 473, + "lineto": 473, "args": [ - { - "name": "at_pos", - "type": "size_t *", - "comment": "the address to which the position of the index entry is written (optional)" - }, { "name": "index", - "type": "git_index *", + "type": "const git_index *", "comment": "an existing index object" - }, - { - "name": "path", - "type": "const char *", - "comment": "path to search" } ], - "argline": "size_t *at_pos, git_index *index, const char *path", - "sig": "size_t *::git_index *::const char *", + "argline": "const git_index *index", + "sig": "const git_index *", "return": { - "type": "int", - "comment": " a zero-based position in the index if found; GIT_ENOTFOUND otherwise" + "type": "size_t", + "comment": " integer of count of current entries" }, - "description": "

Find the first position of any entries which point to given\n path in the Git index.

\n", + "description": "

Get the count of entries currently in the index

\n", "comments": "", - "group": "index" + "group": "index", + "examples": { + "general.c": ["ex/v1.9.1/general.html#git_index_entrycount-36"], + "ls-files.c": ["ex/v1.9.1/ls-files.html#git_index_entrycount-2"] + } }, - "git_index_find_prefix": { + "git_index_clear": { "type": "function", - "file": "index.h", - "line": 695, - "lineto": 695, + "file": "git2/index.h", + "line": 484, + "lineto": 484, "args": [ - { - "name": "at_pos", - "type": "size_t *", - "comment": "the address to which the position of the index entry is written (optional)" - }, { "name": "index", "type": "git_index *", "comment": "an existing index object" - }, - { - "name": "prefix", - "type": "const char *", - "comment": "the prefix to search for" } ], - "argline": "size_t *at_pos, git_index *index, const char *prefix", - "sig": "size_t *::git_index *::const char *", + "argline": "git_index *index", + "sig": "git_index *", "return": { "type": "int", - "comment": " 0 with valid value in at_pos; an error code otherwise" + "comment": " 0 on success, error code \n<\n 0 on failure" }, - "description": "

Find the first position of any entries matching a prefix. To find the first position\n of a path inside a given folder, suffix the prefix with a '/'.

\n", - "comments": "", + "description": "

Clear the contents (all the entries) of an index object.

\n", + "comments": "

This clears the index object in memory; changes must be explicitly written to disk for them to take effect persistently.

\n", "group": "index" }, - "git_index_conflict_add": { + "git_index_get_byindex": { "type": "function", - "file": "index.h", - "line": 720, - "lineto": 724, + "file": "git2/index.h", + "line": 497, + "lineto": 498, "args": [ { "name": "index", @@ -8642,78 +8983,78 @@ "comment": "an existing index object" }, { - "name": "ancestor_entry", - "type": "const git_index_entry *", - "comment": "the entry data for the ancestor of the conflict" - }, - { - "name": "our_entry", - "type": "const git_index_entry *", - "comment": "the entry data for our side of the merge conflict" - }, - { - "name": "their_entry", - "type": "const git_index_entry *", - "comment": "the entry data for their side of the merge conflict" + "name": "n", + "type": "size_t", + "comment": "the position of the entry" } ], - "argline": "git_index *index, const git_index_entry *ancestor_entry, const git_index_entry *our_entry, const git_index_entry *their_entry", - "sig": "git_index *::const git_index_entry *::const git_index_entry *::const git_index_entry *", + "argline": "git_index *index, size_t n", + "sig": "git_index *::size_t", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "const git_index_entry *", + "comment": " a pointer to the entry; NULL if out of bounds" }, - "description": "

Add or update index entries to represent a conflict. Any staged\n entries that exist at the given paths will be removed.

\n", - "comments": "

The entries are the entries from the tree included in the merge. Any entry may be null to indicate that that file was not present in the trees during the merge. For example, ancestor_entry may be NULL to indicate that a file was added in both branches and must be resolved.

\n", - "group": "index" + "description": "

Get a pointer to one of the entries in the index

\n", + "comments": "

The entry is not modifiable and should not be freed. Because the git_index_entry struct is a publicly defined struct, you should be able to make your own permanent copy of the data if necessary.

\n", + "group": "index", + "examples": { + "general.c": ["ex/v1.9.1/general.html#git_index_get_byindex-37"], + "ls-files.c": ["ex/v1.9.1/ls-files.html#git_index_get_byindex-3"] + } }, - "git_index_conflict_get": { + "git_index_get_bypath": { "type": "function", - "file": "index.h", - "line": 740, - "lineto": 745, + "file": "git2/index.h", + "line": 512, + "lineto": 513, "args": [ - { - "name": "ancestor_out", - "type": "const git_index_entry **", - "comment": "Pointer to store the ancestor entry" - }, - { - "name": "our_out", - "type": "const git_index_entry **", - "comment": "Pointer to store the our entry" - }, - { - "name": "their_out", - "type": "const git_index_entry **", - "comment": "Pointer to store the their entry" - }, { "name": "index", "type": "git_index *", "comment": "an existing index object" }, - { - "name": "path", - "type": "const char *", - "comment": "path to search" - } + { "name": "path", "type": "const char *", "comment": "path to search" }, + { "name": "stage", "type": "int", "comment": "stage to search" } ], - "argline": "const git_index_entry **ancestor_out, const git_index_entry **our_out, const git_index_entry **their_out, git_index *index, const char *path", - "sig": "const git_index_entry **::const git_index_entry **::const git_index_entry **::git_index *::const char *", + "argline": "git_index *index, const char *path, int stage", + "sig": "git_index *::const char *::int", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "const git_index_entry *", + "comment": " a pointer to the entry; NULL if it was not found" }, - "description": "

Get the index entries that represent a conflict of a single file.

\n", - "comments": "

The entries are not modifiable and should not be freed. Because the git_index_entry struct is a publicly defined struct, you should be able to make your own permanent copy of the data if necessary.

\n", + "description": "

Get a pointer to one of the entries in the index

\n", + "comments": "

The entry is not modifiable and should not be freed. Because the git_index_entry struct is a publicly defined struct, you should be able to make your own permanent copy of the data if necessary.

\n", + "group": "index", + "examples": { + "ls-files.c": ["ex/v1.9.1/ls-files.html#git_index_get_bypath-4"] + } + }, + "git_index_remove": { + "type": "function", + "file": "git2/index.h", + "line": 523, + "lineto": 523, + "args": [ + { + "name": "index", + "type": "git_index *", + "comment": "an existing index object" + }, + { "name": "path", "type": "const char *", "comment": "path to search" }, + { "name": "stage", "type": "int", "comment": "stage to search" } + ], + "argline": "git_index *index, const char *path, int stage", + "sig": "git_index *::const char *::int", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Remove an entry from the index

\n", + "comments": "", "group": "index" }, - "git_index_conflict_remove": { + "git_index_remove_directory": { "type": "function", - "file": "index.h", - "line": 754, - "lineto": 754, + "file": "git2/index.h", + "line": 533, + "lineto": 534, "args": [ { "name": "index", @@ -8721,878 +9062,1100 @@ "comment": "an existing index object" }, { - "name": "path", + "name": "dir", "type": "const char *", - "comment": "path to remove conflicts for" - } + "comment": "container directory path" + }, + { "name": "stage", "type": "int", "comment": "stage to search" } ], - "argline": "git_index *index, const char *path", - "sig": "git_index *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Removes the index entries that represent a conflict of a single file.

\n", + "argline": "git_index *index, const char *dir, int stage", + "sig": "git_index *::const char *::int", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Remove all entries from the index under a given directory

\n", "comments": "", "group": "index" }, - "git_index_conflict_cleanup": { + "git_index_add": { "type": "function", - "file": "index.h", - "line": 762, - "lineto": 762, + "file": "git2/index.h", + "line": 550, + "lineto": 550, "args": [ { "name": "index", "type": "git_index *", "comment": "an existing index object" + }, + { + "name": "source_entry", + "type": "const git_index_entry *", + "comment": "new entry object" } ], - "argline": "git_index *index", - "sig": "git_index *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Remove all conflicts in the index (entries with a stage greater than 0).

\n", - "comments": "", + "argline": "git_index *index, const git_index_entry *source_entry", + "sig": "git_index *::const git_index_entry *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Add or update an index entry from an in-memory struct

\n", + "comments": "

If a previous index entry exists that has the same path and stage as the given 'source_entry', it will be replaced. Otherwise, the 'source_entry' will be added.

\n\n

A full copy (including the 'path' string) of the given 'source_entry' will be inserted on the index.

\n", "group": "index" }, - "git_index_has_conflicts": { + "git_index_entry_stage": { "type": "function", - "file": "index.h", - "line": 769, - "lineto": 769, + "file": "git2/index.h", + "line": 562, + "lineto": 562, "args": [ { - "name": "index", - "type": "const git_index *", - "comment": null + "name": "entry", + "type": "const git_index_entry *", + "comment": "The entry" } ], - "argline": "const git_index *index", - "sig": "const git_index *", + "argline": "const git_index_entry *entry", + "sig": "const git_index_entry *", + "return": { "type": "int", "comment": " the stage number" }, + "description": "

Return the stage number from a git index entry

\n", + "comments": "

This entry is calculated from the entry's flag attribute like this:

\n\n
(entry->flags & GIT_INDEX_ENTRY_STAGEMASK) >> GIT_INDEX_ENTRY_STAGESHIFT\n
\n", + "group": "index" + }, + "git_index_entry_is_conflict": { + "type": "function", + "file": "git2/index.h", + "line": 571, + "lineto": 571, + "args": [ + { + "name": "entry", + "type": "const git_index_entry *", + "comment": "The entry" + } + ], + "argline": "const git_index_entry *entry", + "sig": "const git_index_entry *", "return": { "type": "int", - "comment": " 1 if at least one conflict is found, 0 otherwise." + "comment": " 1 if the entry is a conflict entry, 0 otherwise" }, - "description": "

Determine if the index contains entries representing file conflicts.

\n", + "description": "

Return whether the given index entry is a conflict (has a high stage\n entry). This is simply shorthand for git_index_entry_stage > 0.

\n", "comments": "", - "group": "index", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_index_has_conflicts-15" - ] - } + "group": "index" }, - "git_index_conflict_iterator_new": { + "git_index_iterator_new": { "type": "function", - "file": "index.h", - "line": 780, - "lineto": 782, + "file": "git2/index.h", + "line": 592, + "lineto": 594, "args": [ { "name": "iterator_out", - "type": "git_index_conflict_iterator **", - "comment": "The newly created conflict iterator" + "type": "git_index_iterator **", + "comment": "The newly created iterator" }, { "name": "index", "type": "git_index *", - "comment": "The index to scan" + "comment": "The index to iterate" } ], - "argline": "git_index_conflict_iterator **iterator_out, git_index *index", - "sig": "git_index_conflict_iterator **::git_index *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create an iterator for the conflicts in the index.

\n", - "comments": "

The index must not be modified while iterating; the results are undefined.

\n", - "group": "index", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_index_conflict_iterator_new-16" - ] - } + "argline": "git_index_iterator **iterator_out, git_index *index", + "sig": "git_index_iterator **::git_index *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create an iterator that will return every entry contained in the\n index at the time of creation. Entries are returned in order,\n sorted by path. This iterator is backed by a snapshot that allows\n callers to modify the index while iterating without affecting the\n iterator.

\n", + "comments": "", + "group": "index" }, - "git_index_conflict_next": { + "git_index_iterator_next": { "type": "function", - "file": "index.h", - "line": 794, - "lineto": 798, + "file": "git2/index.h", + "line": 603, + "lineto": 605, "args": [ { - "name": "ancestor_out", - "type": "const git_index_entry **", - "comment": "Pointer to store the ancestor side of the conflict" - }, - { - "name": "our_out", - "type": "const git_index_entry **", - "comment": "Pointer to store our side of the conflict" - }, - { - "name": "their_out", + "name": "out", "type": "const git_index_entry **", - "comment": "Pointer to store their side of the conflict" + "comment": "Pointer to store the index entry in" }, { "name": "iterator", - "type": "git_index_conflict_iterator *", - "comment": null + "type": "git_index_iterator *", + "comment": "The iterator" } ], - "argline": "const git_index_entry **ancestor_out, const git_index_entry **our_out, const git_index_entry **their_out, git_index_conflict_iterator *iterator", - "sig": "const git_index_entry **::const git_index_entry **::const git_index_entry **::git_index_conflict_iterator *", + "argline": "const git_index_entry **out, git_index_iterator *iterator", + "sig": "const git_index_entry **::git_index_iterator *", "return": { "type": "int", - "comment": " 0 (no error), GIT_ITEROVER (iteration is done) or an error code\n (negative value)" + "comment": " 0, GIT_ITEROVER on iteration completion or an error code" }, - "description": "

Returns the current conflict (ancestor, ours and theirs entry) and\n advance the iterator internally to the next value.

\n", + "description": "

Return the next index entry in-order from the iterator.

\n", "comments": "", - "group": "index", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_index_conflict_next-17" - ] - } + "group": "index" }, - "git_index_conflict_iterator_free": { + "git_index_iterator_free": { "type": "function", - "file": "index.h", - "line": 805, - "lineto": 806, + "file": "git2/index.h", + "line": 612, + "lineto": 612, "args": [ { "name": "iterator", - "type": "git_index_conflict_iterator *", - "comment": "pointer to the iterator" + "type": "git_index_iterator *", + "comment": "The iterator to free" } ], - "argline": "git_index_conflict_iterator *iterator", - "sig": "git_index_conflict_iterator *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Frees a git_index_conflict_iterator.

\n", + "argline": "git_index_iterator *iterator", + "sig": "git_index_iterator *", + "return": { "type": "void", "comment": null }, + "description": "

Free the index iterator

\n", "comments": "", - "group": "index", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_index_conflict_iterator_free-18" - ] - } + "group": "index" }, - "git_indexer_new": { + "git_index_add_bypath": { "type": "function", - "file": "indexer.h", - "line": 30, - "lineto": 36, + "file": "git2/index.h", + "line": 643, + "lineto": 643, "args": [ { - "name": "out", - "type": "git_indexer **", - "comment": "where to store the indexer instance" + "name": "index", + "type": "git_index *", + "comment": "an existing index object" }, + { "name": "path", "type": "const char *", "comment": "filename to add" } + ], + "argline": "git_index *index, const char *path", + "sig": "git_index *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Add or update an index entry from a file on disk

\n", + "comments": "

The file path must be relative to the repository's working folder and must be readable.

\n\n

This method will fail in bare index instances.

\n\n

This forces the file to be added to the index, not looking at gitignore rules. Those rules can be evaluated through the git_status APIs (in status.h) before calling this.

\n\n

If this file currently is the result of a merge conflict, this file will no longer be marked as conflicting. The data about the conflict will be moved to the "resolve undo" (REUC) section.

\n", + "group": "index" + }, + "git_index_add_from_buffer": { + "type": "function", + "file": "git2/index.h", + "line": 671, + "lineto": 674, + "args": [ { - "name": "path", - "type": "const char *", - "comment": "to the directory where the packfile should be stored" + "name": "index", + "type": "git_index *", + "comment": "an existing index object" }, { - "name": "mode", - "type": "unsigned int", - "comment": "permissions to use creating packfile or 0 for defaults" + "name": "entry", + "type": "const git_index_entry *", + "comment": "filename to add" }, { - "name": "odb", - "type": "git_odb *", - "comment": "object database from which to read base objects when\n fixing thin packs. Pass NULL if no thin pack is expected (an error\n will be returned if there are bases missing)" + "name": "buffer", + "type": "const void *", + "comment": "data to be written into the blob" }, + { "name": "len", "type": "size_t", "comment": "length of the data" } + ], + "argline": "git_index *index, const git_index_entry *entry, const void *buffer, size_t len", + "sig": "git_index *::const git_index_entry *::const void *::size_t", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Add or update an index entry from a buffer in memory

\n", + "comments": "

This method will create a blob in the repository that owns the index and then add the index entry to the index. The path of the entry represents the position of the blob relative to the repository's root folder.

\n\n

If a previous index entry exists that has the same path as the given 'entry', it will be replaced. Otherwise, the 'entry' will be added.

\n\n

This forces the file to be added to the index, not looking at gitignore rules. Those rules can be evaluated through the git_status APIs (in status.h) before calling this.

\n\n

If this file currently is the result of a merge conflict, this file will no longer be marked as conflicting. The data about the conflict will be moved to the "resolve undo" (REUC) section.

\n", + "group": "index" + }, + "git_index_remove_bypath": { + "type": "function", + "file": "git2/index.h", + "line": 690, + "lineto": 690, + "args": [ { - "name": "progress_cb", - "type": "git_transfer_progress_cb", - "comment": "function to call with progress information" + "name": "index", + "type": "git_index *", + "comment": "an existing index object" }, { - "name": "progress_cb_payload", - "type": "void *", - "comment": "payload for the progress callback" + "name": "path", + "type": "const char *", + "comment": "filename to remove" } ], - "argline": "git_indexer **out, const char *path, unsigned int mode, git_odb *odb, git_transfer_progress_cb progress_cb, void *progress_cb_payload", - "sig": "git_indexer **::const char *::unsigned int::git_odb *::git_transfer_progress_cb::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Create a new indexer instance

\n", - "comments": "", - "group": "indexer", - "examples": { - "network/index-pack.c": [ - "ex/HEAD/network/index-pack.html#git_indexer_new-1" - ] - } + "argline": "git_index *index, const char *path", + "sig": "git_index *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Remove an index entry corresponding to a file on disk

\n", + "comments": "

The file path must be relative to the repository's working folder. It may exist.

\n\n

If this file currently is the result of a merge conflict, this file will no longer be marked as conflicting. The data about the conflict will be moved to the "resolve undo" (REUC) section.

\n", + "group": "index" }, - "git_indexer_append": { + "git_index_add_all": { "type": "function", - "file": "indexer.h", - "line": 46, - "lineto": 46, + "file": "git2/index.h", + "line": 738, + "lineto": 743, "args": [ { - "name": "idx", - "type": "git_indexer *", - "comment": "the indexer" + "name": "index", + "type": "git_index *", + "comment": "an existing index object" }, { - "name": "data", - "type": "const void *", - "comment": "the data to add" + "name": "pathspec", + "type": "const git_strarray *", + "comment": "array of path patterns" }, { - "name": "size", - "type": "size_t", - "comment": "the size of the data in bytes" + "name": "flags", + "type": "unsigned int", + "comment": "combination of git_index_add_option_t flags" }, { - "name": "stats", - "type": "git_transfer_progress *", - "comment": "stat storage" + "name": "callback", + "type": "git_index_matched_path_cb", + "comment": "notification callback for each added/updated path (also\n gets index of matching pathspec entry); can be NULL;\n return 0 to add, >0 to skip, \n<\n0 to abort scan." + }, + { + "name": "payload", + "type": "void *", + "comment": "payload passed through to callback function" } ], - "argline": "git_indexer *idx, const void *data, size_t size, git_transfer_progress *stats", - "sig": "git_indexer *::const void *::size_t::git_transfer_progress *", + "argline": "git_index *index, const git_strarray *pathspec, unsigned int flags, git_index_matched_path_cb callback, void *payload", + "sig": "git_index *::const git_strarray *::unsigned int::git_index_matched_path_cb::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, negative callback return value, or error code" }, - "description": "

Add data to the indexer

\n", - "comments": "", - "group": "indexer", - "examples": { - "network/index-pack.c": [ - "ex/HEAD/network/index-pack.html#git_indexer_append-2" - ] - } + "description": "

Add or update index entries matching files in the working directory.

\n", + "comments": "

This method will fail in bare index instances.

\n\n

The pathspec is a list of file names or shell glob patterns that will be matched against files in the repository's working directory. Each file that matches will be added to the index (either updating an existing entry or adding a new entry). You can disable glob expansion and force exact matching with the GIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH flag.

\n\n

Files that are ignored will be skipped (unlike git_index_add_bypath). If a file is already tracked in the index, then it will be updated even if it is ignored. Pass the GIT_INDEX_ADD_FORCE flag to skip the checking of ignore rules.

\n\n

To emulate git add -A and generate an error if the pathspec contains the exact path of an ignored file (when not using FORCE), add the GIT_INDEX_ADD_CHECK_PATHSPEC flag. This checks that each entry in the pathspec that is an exact match to a filename on disk is either not ignored or already in the index. If this check fails, the function will return GIT_EINVALIDSPEC.

\n\n

To emulate git add -A with the "dry-run" option, just use a callback function that always returns a positive value. See below for details.

\n\n

If any files are currently the result of a merge conflict, those files will no longer be marked as conflicting. The data about the conflicts will be moved to the "resolve undo" (REUC) section.

\n\n

If you provide a callback function, it will be invoked on each matching item in the working directory immediately before it is added to / updated in the index. Returning zero will add the item to the index, greater than zero will skip the item, and less than zero will abort the scan and return that value to the caller.

\n", + "group": "index", + "examples": { "add.c": ["ex/v1.9.1/add.html#git_index_add_all-3"] } }, - "git_indexer_commit": { + "git_index_remove_all": { "type": "function", - "file": "indexer.h", - "line": 55, - "lineto": 55, + "file": "git2/index.h", + "line": 760, + "lineto": 764, "args": [ { - "name": "idx", - "type": "git_indexer *", - "comment": "the indexer" + "name": "index", + "type": "git_index *", + "comment": "An existing index object" }, { - "name": "stats", - "type": "git_transfer_progress *", - "comment": null + "name": "pathspec", + "type": "const git_strarray *", + "comment": "array of path patterns" + }, + { + "name": "callback", + "type": "git_index_matched_path_cb", + "comment": "notification callback for each removed path (also\n gets index of matching pathspec entry); can be NULL;\n return 0 to add, >0 to skip, \n<\n0 to abort scan." + }, + { + "name": "payload", + "type": "void *", + "comment": "payload passed through to callback function" } ], - "argline": "git_indexer *idx, git_transfer_progress *stats", - "sig": "git_indexer *::git_transfer_progress *", + "argline": "git_index *index, const git_strarray *pathspec, git_index_matched_path_cb callback, void *payload", + "sig": "git_index *::const git_strarray *::git_index_matched_path_cb::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, negative callback return value, or error code" }, - "description": "

Finalize the pack and index

\n", - "comments": "

Resolve any pending deltas and write out the index file

\n", - "group": "indexer", - "examples": { - "network/index-pack.c": [ - "ex/HEAD/network/index-pack.html#git_indexer_commit-3" - ] - } + "description": "

Remove all matching index entries.

\n", + "comments": "

If you provide a callback function, it will be invoked on each matching item in the index immediately before it is removed. Return 0 to remove the item, > 0 to skip the item, and < 0 to abort the scan.

\n", + "group": "index" }, - "git_indexer_hash": { + "git_index_update_all": { "type": "function", - "file": "indexer.h", - "line": 65, - "lineto": 65, + "file": "git2/index.h", + "line": 789, + "lineto": 793, "args": [ { - "name": "idx", - "type": "const git_indexer *", - "comment": "the indexer instance" + "name": "index", + "type": "git_index *", + "comment": "An existing index object" + }, + { + "name": "pathspec", + "type": "const git_strarray *", + "comment": "array of path patterns" + }, + { + "name": "callback", + "type": "git_index_matched_path_cb", + "comment": "notification callback for each updated path (also\n gets index of matching pathspec entry); can be NULL;\n return 0 to add, >0 to skip, \n<\n0 to abort scan." + }, + { + "name": "payload", + "type": "void *", + "comment": "payload passed through to callback function" } ], - "argline": "const git_indexer *idx", - "sig": "const git_indexer *", + "argline": "git_index *index, const git_strarray *pathspec, git_index_matched_path_cb callback, void *payload", + "sig": "git_index *::const git_strarray *::git_index_matched_path_cb::void *", "return": { - "type": "const git_oid *", - "comment": null + "type": "int", + "comment": " 0 on success, negative callback return value, or error code" }, - "description": "

Get the packfile's hash

\n", - "comments": "

A packfile's name is derived from the sorted hashing of all object names. This is only correct after the index has been finalized.

\n", - "group": "indexer", - "examples": { - "network/index-pack.c": [ - "ex/HEAD/network/index-pack.html#git_indexer_hash-4" - ] - } + "description": "

Update all index entries to match the working directory

\n", + "comments": "

This method will fail in bare index instances.

\n\n

This scans the existing index entries and synchronizes them with the working directory, deleting them if the corresponding working directory file no longer exists otherwise updating the information (including adding the latest version of file to the ODB if needed).

\n\n

If you provide a callback function, it will be invoked on each matching item in the index immediately before it is updated (either refreshed or removed depending on working directory state). Return 0 to proceed with updating the item, > 0 to skip the item, and < 0 to abort the scan.

\n", + "group": "index", + "examples": { "add.c": ["ex/v1.9.1/add.html#git_index_update_all-4"] } }, - "git_indexer_free": { + "git_index_find": { "type": "function", - "file": "indexer.h", - "line": 72, - "lineto": 72, + "file": "git2/index.h", + "line": 804, + "lineto": 804, "args": [ { - "name": "idx", - "type": "git_indexer *", - "comment": "the indexer to free" - } + "name": "at_pos", + "type": "size_t *", + "comment": "the address to which the position of the index entry is written (optional)" + }, + { + "name": "index", + "type": "git_index *", + "comment": "an existing index object" + }, + { "name": "path", "type": "const char *", "comment": "path to search" } ], - "argline": "git_indexer *idx", - "sig": "git_indexer *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Free the indexer and its resources

\n", + "argline": "size_t *at_pos, git_index *index, const char *path", + "sig": "size_t *::git_index *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Find the first position of any entries which point to given\n path in the Git index.

\n", "comments": "", - "group": "indexer", - "examples": { - "network/index-pack.c": [ - "ex/HEAD/network/index-pack.html#git_indexer_free-5" - ] - } + "group": "index" }, - "git_merge_file_init_input": { + "git_index_find_prefix": { "type": "function", - "file": "merge.h", - "line": 60, - "lineto": 62, + "file": "git2/index.h", + "line": 815, + "lineto": 815, "args": [ { - "name": "opts", - "type": "git_merge_file_input *", - "comment": "the `git_merge_file_input` instance to initialize." + "name": "at_pos", + "type": "size_t *", + "comment": "the address to which the position of the index entry is written (optional)" }, { - "name": "version", - "type": "unsigned int", - "comment": "the version of the struct; you should pass\n `GIT_MERGE_FILE_INPUT_VERSION` here." + "name": "index", + "type": "git_index *", + "comment": "an existing index object" + }, + { + "name": "prefix", + "type": "const char *", + "comment": "the prefix to search for" } ], - "argline": "git_merge_file_input *opts, unsigned int version", - "sig": "git_merge_file_input *::unsigned int", - "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." - }, - "description": "

Initializes a git_merge_file_input with default values. Equivalent to\n creating an instance with GIT_MERGE_FILE_INPUT_INIT.

\n", + "argline": "size_t *at_pos, git_index *index, const char *prefix", + "sig": "size_t *::git_index *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Find the first position of any entries matching a prefix. To find the first position\n of a path inside a given folder, suffix the prefix with a '/'.

\n", "comments": "", - "group": "merge" + "group": "index" }, - "git_merge_file_init_options": { + "git_index_conflict_add": { "type": "function", - "file": "merge.h", - "line": 214, - "lineto": 216, + "file": "git2/index.h", + "line": 840, + "lineto": 844, "args": [ { - "name": "opts", - "type": "git_merge_file_options *", - "comment": "the `git_merge_file_options` instance to initialize." + "name": "index", + "type": "git_index *", + "comment": "an existing index object" }, { - "name": "version", - "type": "unsigned int", - "comment": "the version of the struct; you should pass\n `GIT_MERGE_FILE_OPTIONS_VERSION` here." - } - ], - "argline": "git_merge_file_options *opts, unsigned int version", - "sig": "git_merge_file_options *::unsigned int", - "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." - }, - "description": "

Initializes a git_merge_file_options with default values. Equivalent to\n creating an instance with GIT_MERGE_FILE_OPTIONS_INIT.

\n", - "comments": "", - "group": "merge" - }, - "git_merge_init_options": { - "type": "function", - "file": "merge.h", - "line": 311, - "lineto": 313, - "args": [ + "name": "ancestor_entry", + "type": "const git_index_entry *", + "comment": "the entry data for the ancestor of the conflict" + }, { - "name": "opts", - "type": "git_merge_options *", - "comment": "the `git_merge_options` instance to initialize." + "name": "our_entry", + "type": "const git_index_entry *", + "comment": "the entry data for our side of the merge conflict" }, { - "name": "version", - "type": "unsigned int", - "comment": "the version of the struct; you should pass\n `GIT_MERGE_OPTIONS_VERSION` here." + "name": "their_entry", + "type": "const git_index_entry *", + "comment": "the entry data for their side of the merge conflict" } ], - "argline": "git_merge_options *opts, unsigned int version", - "sig": "git_merge_options *::unsigned int", - "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." - }, - "description": "

Initializes a git_merge_options with default values. Equivalent to\n creating an instance with GIT_MERGE_OPTIONS_INIT.

\n", - "comments": "", - "group": "merge" + "argline": "git_index *index, const git_index_entry *ancestor_entry, const git_index_entry *our_entry, const git_index_entry *their_entry", + "sig": "git_index *::const git_index_entry *::const git_index_entry *::const git_index_entry *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Add or update index entries to represent a conflict. Any staged\n entries that exist at the given paths will be removed.

\n", + "comments": "

The entries are the entries from the tree included in the merge. Any entry may be null to indicate that that file was not present in the trees during the merge. For example, ancestor_entry may be NULL to indicate that a file was added in both branches and must be resolved.

\n", + "group": "index" }, - "git_merge_analysis": { + "git_index_conflict_get": { "type": "function", - "file": "merge.h", - "line": 382, - "lineto": 387, + "file": "git2/index.h", + "line": 860, + "lineto": 865, "args": [ { - "name": "analysis_out", - "type": "git_merge_analysis_t *", - "comment": "analysis enumeration that the result is written into" + "name": "ancestor_out", + "type": "const git_index_entry **", + "comment": "Pointer to store the ancestor entry" }, { - "name": "preference_out", - "type": "git_merge_preference_t *", - "comment": null + "name": "our_out", + "type": "const git_index_entry **", + "comment": "Pointer to store the our entry" }, { - "name": "repo", - "type": "git_repository *", - "comment": "the repository to merge" + "name": "their_out", + "type": "const git_index_entry **", + "comment": "Pointer to store the their entry" }, { - "name": "their_heads", - "type": "const git_annotated_commit **", - "comment": "the heads to merge into" + "name": "index", + "type": "git_index *", + "comment": "an existing index object" + }, + { "name": "path", "type": "const char *", "comment": "path to search" } + ], + "argline": "const git_index_entry **ancestor_out, const git_index_entry **our_out, const git_index_entry **their_out, git_index *index, const char *path", + "sig": "const git_index_entry **::const git_index_entry **::const git_index_entry **::git_index *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Get the index entries that represent a conflict of a single file.

\n", + "comments": "

The entries are not modifiable and should not be freed. Because the git_index_entry struct is a publicly defined struct, you should be able to make your own permanent copy of the data if necessary.

\n", + "group": "index" + }, + "git_index_conflict_remove": { + "type": "function", + "file": "git2/index.h", + "line": 874, + "lineto": 874, + "args": [ + { + "name": "index", + "type": "git_index *", + "comment": "an existing index object" }, { - "name": "their_heads_len", - "type": "size_t", - "comment": "the number of heads to merge" + "name": "path", + "type": "const char *", + "comment": "path to remove conflicts for" } ], - "argline": "git_merge_analysis_t *analysis_out, git_merge_preference_t *preference_out, git_repository *repo, const git_annotated_commit **their_heads, size_t their_heads_len", - "sig": "git_merge_analysis_t *::git_merge_preference_t *::git_repository *::const git_annotated_commit **::size_t", + "argline": "git_index *index, const char *path", + "sig": "git_index *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Removes the index entries that represent a conflict of a single file.

\n", + "comments": "", + "group": "index" + }, + "git_index_conflict_cleanup": { + "type": "function", + "file": "git2/index.h", + "line": 882, + "lineto": 882, + "args": [ + { + "name": "index", + "type": "git_index *", + "comment": "an existing index object" + } + ], + "argline": "git_index *index", + "sig": "git_index *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Remove all conflicts in the index (entries with a stage greater than 0).

\n", + "comments": "", + "group": "index" + }, + "git_index_has_conflicts": { + "type": "function", + "file": "git2/index.h", + "line": 890, + "lineto": 890, + "args": [ + { + "name": "index", + "type": "const git_index *", + "comment": "An existing index object." + } + ], + "argline": "const git_index *index", + "sig": "const git_index *", "return": { "type": "int", - "comment": " 0 on success or error code" + "comment": " 1 if at least one conflict is found, 0 otherwise." }, - "description": "

Analyzes the given branch(es) and determines the opportunities for\n merging them into the HEAD of the repository.

\n", + "description": "

Determine if the index contains entries representing file conflicts.

\n", "comments": "", - "group": "merge", + "group": "index", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_merge_analysis-19" - ] + "merge.c": ["ex/v1.9.1/merge.html#git_index_has_conflicts-11"] } }, - "git_merge_base": { + "git_index_conflict_iterator_new": { "type": "function", - "file": "merge.h", - "line": 398, - "lineto": 402, + "file": "git2/index.h", + "line": 901, + "lineto": 903, "args": [ { - "name": "out", - "type": "git_oid *", - "comment": "the OID of a merge base between 'one' and 'two'" + "name": "iterator_out", + "type": "git_index_conflict_iterator **", + "comment": "The newly created conflict iterator" }, { - "name": "repo", - "type": "git_repository *", - "comment": "the repository where the commits exist" + "name": "index", + "type": "git_index *", + "comment": "The index to scan" + } + ], + "argline": "git_index_conflict_iterator **iterator_out, git_index *index", + "sig": "git_index_conflict_iterator **::git_index *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create an iterator for the conflicts in the index.

\n", + "comments": "

The index must not be modified while iterating; the results are undefined.

\n", + "group": "index", + "examples": { + "merge.c": ["ex/v1.9.1/merge.html#git_index_conflict_iterator_new-12"] + } + }, + "git_index_conflict_next": { + "type": "function", + "file": "git2/index.h", + "line": 916, + "lineto": 920, + "args": [ + { + "name": "ancestor_out", + "type": "const git_index_entry **", + "comment": "Pointer to store the ancestor side of the conflict" }, { - "name": "one", - "type": "const git_oid *", - "comment": "one of the commits" + "name": "our_out", + "type": "const git_index_entry **", + "comment": "Pointer to store our side of the conflict" }, { - "name": "two", - "type": "const git_oid *", - "comment": "the other commit" + "name": "their_out", + "type": "const git_index_entry **", + "comment": "Pointer to store their side of the conflict" + }, + { + "name": "iterator", + "type": "git_index_conflict_iterator *", + "comment": "The conflict iterator." } ], - "argline": "git_oid *out, git_repository *repo, const git_oid *one, const git_oid *two", - "sig": "git_oid *::git_repository *::const git_oid *::const git_oid *", + "argline": "const git_index_entry **ancestor_out, const git_index_entry **our_out, const git_index_entry **their_out, git_index_conflict_iterator *iterator", + "sig": "const git_index_entry **::const git_index_entry **::const git_index_entry **::git_index_conflict_iterator *", "return": { "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if not found or error code" + "comment": " 0 (no error), GIT_ITEROVER (iteration is done) or an error code\n (negative value)" }, - "description": "

Find a merge base between two commits

\n", + "description": "

Returns the current conflict (ancestor, ours and theirs entry) and\n advance the iterator internally to the next value.

\n", "comments": "", - "group": "merge", + "group": "index", "examples": { - "log.c": [ - "ex/HEAD/log.html#git_merge_base-33" - ], - "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_merge_base-3" - ] + "merge.c": ["ex/v1.9.1/merge.html#git_index_conflict_next-13"] } }, - "git_merge_bases": { + "git_index_conflict_iterator_free": { "type": "function", - "file": "merge.h", - "line": 413, - "lineto": 417, + "file": "git2/index.h", + "line": 927, + "lineto": 928, "args": [ { - "name": "out", - "type": "git_oidarray *", - "comment": "array in which to store the resulting ids" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository where the commits exist" - }, + "name": "iterator", + "type": "git_index_conflict_iterator *", + "comment": "pointer to the iterator" + } + ], + "argline": "git_index_conflict_iterator *iterator", + "sig": "git_index_conflict_iterator *", + "return": { "type": "void", "comment": null }, + "description": "

Frees a git_index_conflict_iterator.

\n", + "comments": "", + "group": "index", + "examples": { + "merge.c": ["ex/v1.9.1/merge.html#git_index_conflict_iterator_free-14"] + } + }, + "git_indexer_options_init": { + "type": "function", + "file": "git2/indexer.h", + "line": 116, + "lineto": 118, + "args": [ { - "name": "one", - "type": "const git_oid *", - "comment": "one of the commits" + "name": "opts", + "type": "git_indexer_options *", + "comment": "the `git_indexer_options` struct to initialize." }, { - "name": "two", - "type": "const git_oid *", - "comment": "the other commit" + "name": "version", + "type": "unsigned int", + "comment": "Version of struct; pass `GIT_INDEXER_OPTIONS_VERSION`" } ], - "argline": "git_oidarray *out, git_repository *repo, const git_oid *one, const git_oid *two", - "sig": "git_oidarray *::git_repository *::const git_oid *::const git_oid *", + "argline": "git_indexer_options *opts, unsigned int version", + "sig": "git_indexer_options *::unsigned int", "return": { "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if not found or error code" + "comment": " Zero on success; -1 on failure." }, - "description": "

Find merge bases between two commits

\n", + "description": "

Initializes a git_indexer_options with default values. Equivalent to\n creating an instance with GIT_INDEXER_OPTIONS_INIT.

\n", "comments": "", - "group": "merge" + "group": "indexer" }, - "git_merge_base_many": { + "git_indexer_new": { "type": "function", - "file": "merge.h", - "line": 428, - "lineto": 432, + "file": "git2/indexer.h", + "line": 147, + "lineto": 152, "args": [ { "name": "out", - "type": "git_oid *", - "comment": "the OID of a merge base considering all the commits" + "type": "git_indexer **", + "comment": "where to store the indexer instance" }, { - "name": "repo", - "type": "git_repository *", - "comment": "the repository where the commits exist" + "name": "path", + "type": "const char *", + "comment": "to the directory where the packfile should be stored" }, { - "name": "length", - "type": "size_t", - "comment": "The number of commits in the provided `input_array`" + "name": "mode", + "type": "unsigned int", + "comment": "permissions to use creating packfile or 0 for defaults" }, { - "name": "input_array", - "type": "const git_oid []", - "comment": "oids of the commits" + "name": "odb", + "type": "git_odb *", + "comment": "object database from which to read base objects when\n fixing thin packs. Pass NULL if no thin pack is expected (an error\n will be returned if there are bases missing)" + }, + { + "name": "opts", + "type": "git_indexer_options *", + "comment": "Optional structure containing additional options. See\n `git_indexer_options` above." } ], - "argline": "git_oid *out, git_repository *repo, size_t length, const git_oid [] input_array", - "sig": "git_oid *::git_repository *::size_t::const git_oid []", - "return": { - "type": "int", - "comment": " Zero on success; GIT_ENOTFOUND or -1 on failure." - }, - "description": "

Find a merge base given a list of commits

\n", + "argline": "git_indexer **out, const char *path, unsigned int mode, git_odb *odb, git_indexer_options *opts", + "sig": "git_indexer **::const char *::unsigned int::git_odb *::git_indexer_options *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Create a new indexer instance

\n", "comments": "", - "group": "merge" + "group": "indexer" }, - "git_merge_bases_many": { + "git_indexer_append": { "type": "function", - "file": "merge.h", - "line": 443, - "lineto": 447, + "file": "git2/indexer.h", + "line": 164, + "lineto": 164, "args": [ + { "name": "idx", "type": "git_indexer *", "comment": "the indexer" }, { - "name": "out", - "type": "git_oidarray *", - "comment": "array in which to store the resulting ids" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository where the commits exist" + "name": "data", + "type": "const void *", + "comment": "the data to add" }, { - "name": "length", + "name": "size", "type": "size_t", - "comment": "The number of commits in the provided `input_array`" + "comment": "the size of the data in bytes" }, { - "name": "input_array", - "type": "const git_oid []", - "comment": "oids of the commits" + "name": "stats", + "type": "git_indexer_progress *", + "comment": "stat storage" } ], - "argline": "git_oidarray *out, git_repository *repo, size_t length, const git_oid [] input_array", - "sig": "git_oidarray *::git_repository *::size_t::const git_oid []", - "return": { - "type": "int", - "comment": " Zero on success; GIT_ENOTFOUND or -1 on failure." - }, - "description": "

Find all merge bases given a list of commits

\n", + "argline": "git_indexer *idx, const void *data, size_t size, git_indexer_progress *stats", + "sig": "git_indexer *::const void *::size_t::git_indexer_progress *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Add data to the indexer

\n", "comments": "", - "group": "merge" + "group": "indexer" }, - "git_merge_base_octopus": { + "git_indexer_commit": { "type": "function", - "file": "merge.h", - "line": 458, - "lineto": 462, + "file": "git2/indexer.h", + "line": 175, + "lineto": 175, "args": [ + { "name": "idx", "type": "git_indexer *", "comment": "the indexer" }, { - "name": "out", - "type": "git_oid *", - "comment": "the OID of a merge base considering all the commits" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository where the commits exist" - }, - { - "name": "length", - "type": "size_t", - "comment": "The number of commits in the provided `input_array`" - }, + "name": "stats", + "type": "git_indexer_progress *", + "comment": "Stat storage." + } + ], + "argline": "git_indexer *idx, git_indexer_progress *stats", + "sig": "git_indexer *::git_indexer_progress *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Finalize the pack and index

\n", + "comments": "

Resolve any pending deltas and write out the index file

\n", + "group": "indexer" + }, + "git_indexer_hash": { + "type": "function", + "file": "git2/indexer.h", + "line": 188, + "lineto": 188, + "args": [ { - "name": "input_array", - "type": "const git_oid []", - "comment": "oids of the commits" + "name": "idx", + "type": "const git_indexer *", + "comment": "the indexer instance" } ], - "argline": "git_oid *out, git_repository *repo, size_t length, const git_oid [] input_array", - "sig": "git_oid *::git_repository *::size_t::const git_oid []", + "argline": "const git_indexer *idx", + "sig": "const git_indexer *", "return": { - "type": "int", - "comment": " Zero on success; GIT_ENOTFOUND or -1 on failure." + "type": "const git_oid *", + "comment": " the packfile's hash" }, - "description": "

Find a merge base in preparation for an octopus merge

\n", - "comments": "", - "group": "merge" + "description": "

Get the packfile's hash

\n", + "comments": "

A packfile's name is derived from the sorted hashing of all object names. This is only correct after the index has been finalized.

\n", + "group": "indexer" }, - "git_merge_file": { + "git_indexer_name": { "type": "function", - "file": "merge.h", - "line": 480, - "lineto": 485, + "file": "git2/indexer.h", + "line": 200, + "lineto": 200, "args": [ { - "name": "out", - "type": "git_merge_file_result *", - "comment": "The git_merge_file_result to be filled in" - }, - { - "name": "ancestor", - "type": "const git_merge_file_input *", - "comment": "The contents of the ancestor file" - }, - { - "name": "ours", - "type": "const git_merge_file_input *", - "comment": "The contents of the file in \"our\" side" - }, - { - "name": "theirs", - "type": "const git_merge_file_input *", - "comment": "The contents of the file in \"their\" side" - }, - { - "name": "opts", - "type": "const git_merge_file_options *", - "comment": "The merge file options or `NULL` for defaults" + "name": "idx", + "type": "const git_indexer *", + "comment": "the indexer instance" } ], - "argline": "git_merge_file_result *out, const git_merge_file_input *ancestor, const git_merge_file_input *ours, const git_merge_file_input *theirs, const git_merge_file_options *opts", - "sig": "git_merge_file_result *::const git_merge_file_input *::const git_merge_file_input *::const git_merge_file_input *::const git_merge_file_options *", + "argline": "const git_indexer *idx", + "sig": "const git_indexer *", "return": { - "type": "int", - "comment": " 0 on success or error code" + "type": "const char *", + "comment": " a NUL terminated string for the packfile name" }, - "description": "

Merge two files as they exist in the in-memory data structures, using\n the given common ancestor as the baseline, producing a\n git_merge_file_result that reflects the merge result. The\n git_merge_file_result must be freed with git_merge_file_result_free.

\n", - "comments": "

Note that this function does not reference a repository and any configuration must be passed as git_merge_file_options.

\n", - "group": "merge" + "description": "

Get the unique name for the resulting packfile.

\n", + "comments": "

The packfile's name is derived from the packfile's content. This is only correct after the index has been finalized.

\n", + "group": "indexer" }, - "git_merge_file_from_index": { + "git_indexer_free": { "type": "function", - "file": "merge.h", - "line": 501, - "lineto": 507, + "file": "git2/indexer.h", + "line": 207, + "lineto": 207, + "args": [ + { + "name": "idx", + "type": "git_indexer *", + "comment": "the indexer to free" + } + ], + "argline": "git_indexer *idx", + "sig": "git_indexer *", + "return": { "type": "void", "comment": null }, + "description": "

Free the indexer and its resources

\n", + "comments": "", + "group": "indexer" + }, + "git_mailmap_new": { + "type": "function", + "file": "git2/mailmap.h", + "line": 37, + "lineto": 37, "args": [ { "name": "out", - "type": "git_merge_file_result *", - "comment": "The git_merge_file_result to be filled in" - }, + "type": "git_mailmap **", + "comment": "pointer to store the new mailmap" + } + ], + "argline": "git_mailmap **out", + "sig": "git_mailmap **", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, + "description": "

Allocate a new mailmap object.

\n", + "comments": "

This object is empty, so you'll have to add a mailmap file before you can do anything with it. The mailmap must be freed with 'git_mailmap_free'.

\n", + "group": "mailmap" + }, + "git_mailmap_free": { + "type": "function", + "file": "git2/mailmap.h", + "line": 44, + "lineto": 44, + "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "The repository" + "name": "mm", + "type": "git_mailmap *", + "comment": "the mailmap to free" + } + ], + "argline": "git_mailmap *mm", + "sig": "git_mailmap *", + "return": { "type": "void", "comment": null }, + "description": "

Free the mailmap and its associated memory.

\n", + "comments": "", + "group": "mailmap" + }, + "git_mailmap_add_entry": { + "type": "function", + "file": "git2/mailmap.h", + "line": 57, + "lineto": 59, + "args": [ + { + "name": "mm", + "type": "git_mailmap *", + "comment": "mailmap to add the entry to" }, { - "name": "ancestor", - "type": "const git_index_entry *", - "comment": "The index entry for the ancestor file (stage level 1)" + "name": "real_name", + "type": "const char *", + "comment": "the real name to use, or NULL" }, { - "name": "ours", - "type": "const git_index_entry *", - "comment": "The index entry for our file (stage level 2)" + "name": "real_email", + "type": "const char *", + "comment": "the real email to use, or NULL" }, { - "name": "theirs", - "type": "const git_index_entry *", - "comment": "The index entry for their file (stage level 3)" + "name": "replace_name", + "type": "const char *", + "comment": "the name to replace, or NULL" }, { - "name": "opts", - "type": "const git_merge_file_options *", - "comment": "The merge file options or NULL" + "name": "replace_email", + "type": "const char *", + "comment": "the email to replace" } ], - "argline": "git_merge_file_result *out, git_repository *repo, const git_index_entry *ancestor, const git_index_entry *ours, const git_index_entry *theirs, const git_merge_file_options *opts", - "sig": "git_merge_file_result *::git_repository *::const git_index_entry *::const git_index_entry *::const git_index_entry *::const git_merge_file_options *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, - "description": "

Merge two files as they exist in the index, using the given common\n ancestor as the baseline, producing a git_merge_file_result that\n reflects the merge result. The git_merge_file_result must be freed with\n git_merge_file_result_free.

\n", + "argline": "git_mailmap *mm, const char *real_name, const char *real_email, const char *replace_name, const char *replace_email", + "sig": "git_mailmap *::const char *::const char *::const char *::const char *", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, + "description": "

Add a single entry to the given mailmap object. If the entry already exists,\n it will be replaced with the new entry.

\n", "comments": "", - "group": "merge" + "group": "mailmap" }, - "git_merge_file_result_free": { + "git_mailmap_from_buffer": { "type": "function", - "file": "merge.h", - "line": 514, - "lineto": 514, + "file": "git2/mailmap.h", + "line": 69, + "lineto": 70, "args": [ { - "name": "result", - "type": "git_merge_file_result *", - "comment": "The result to free or `NULL`" + "name": "out", + "type": "git_mailmap **", + "comment": "pointer to store the new mailmap" + }, + { + "name": "buf", + "type": "const char *", + "comment": "buffer to parse the mailmap from" + }, + { + "name": "len", + "type": "size_t", + "comment": "the length of the input buffer" } ], - "argline": "git_merge_file_result *result", - "sig": "git_merge_file_result *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Frees a git_merge_file_result.

\n", + "argline": "git_mailmap **out, const char *buf, size_t len", + "sig": "git_mailmap **::const char *::size_t", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, + "description": "

Create a new mailmap instance containing a single mailmap file

\n", "comments": "", - "group": "merge" + "group": "mailmap" }, - "git_merge_trees": { + "git_mailmap_from_repository": { "type": "function", - "file": "merge.h", - "line": 532, - "lineto": 538, + "file": "git2/mailmap.h", + "line": 86, + "lineto": 87, "args": [ { "name": "out", - "type": "git_index **", - "comment": "pointer to store the index result in" + "type": "git_mailmap **", + "comment": "pointer to store the new mailmap" }, { "name": "repo", "type": "git_repository *", - "comment": "repository that contains the given trees" + "comment": "repository to load mailmap information from" + } + ], + "argline": "git_mailmap **out, git_repository *repo", + "sig": "git_mailmap **::git_repository *", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, + "description": "

Create a new mailmap instance from a repository, loading mailmap files based\n on the repository's configuration.

\n", + "comments": "

Mailmaps are loaded in the following order: 1. '.mailmap' in the root of the repository's working directory, if present. 2. The blob object identified by the 'mailmap.blob' config entry, if set. [NOTE: 'mailmap.blob' defaults to 'HEAD:.mailmap' in bare repositories] 3. The path in the 'mailmap.file' config entry, if set.

\n", + "group": "mailmap" + }, + "git_mailmap_resolve": { + "type": "function", + "file": "git2/mailmap.h", + "line": 101, + "lineto": 103, + "args": [ + { + "name": "real_name", + "type": "const char **", + "comment": "pointer to store the real name" }, { - "name": "ancestor_tree", - "type": "const git_tree *", - "comment": "the common ancestor between the trees (or null if none)" + "name": "real_email", + "type": "const char **", + "comment": "pointer to store the real email" }, { - "name": "our_tree", - "type": "const git_tree *", - "comment": "the tree that reflects the destination tree" + "name": "mm", + "type": "const git_mailmap *", + "comment": "the mailmap to perform a lookup with (may be NULL)" }, { - "name": "their_tree", - "type": "const git_tree *", - "comment": "the tree to merge in to `our_tree`" + "name": "name", + "type": "const char *", + "comment": "the name to look up" }, { - "name": "opts", - "type": "const git_merge_options *", - "comment": "the merge tree options (or null for defaults)" + "name": "email", + "type": "const char *", + "comment": "the email to look up" } ], - "argline": "git_index **out, git_repository *repo, const git_tree *ancestor_tree, const git_tree *our_tree, const git_tree *their_tree, const git_merge_options *opts", - "sig": "git_index **::git_repository *::const git_tree *::const git_tree *::const git_tree *::const git_merge_options *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, - "description": "

Merge two trees, producing a git_index that reflects the result of\n the merge. The index may be written as-is to the working directory\n or checked out. If the index is to be converted to a tree, the caller\n should resolve any conflicts that arose as part of the merge.

\n", - "comments": "

The returned index must be freed explicitly with git_index_free.

\n", - "group": "merge" + "argline": "const char **real_name, const char **real_email, const git_mailmap *mm, const char *name, const char *email", + "sig": "const char **::const char **::const git_mailmap *::const char *::const char *", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, + "description": "

Resolve a name and email to the corresponding real name and email.

\n", + "comments": "

The lifetime of the strings are tied to mm, name, and email parameters.

\n", + "group": "mailmap" }, - "git_merge_commits": { + "git_mailmap_resolve_signature": { "type": "function", - "file": "merge.h", - "line": 555, - "lineto": 560, + "file": "git2/mailmap.h", + "line": 115, + "lineto": 116, "args": [ { "name": "out", - "type": "git_index **", - "comment": "pointer to store the index result in" + "type": "git_signature **", + "comment": "new signature" }, { - "name": "repo", - "type": "git_repository *", - "comment": "repository that contains the given trees" + "name": "mm", + "type": "const git_mailmap *", + "comment": "mailmap to resolve with" }, { - "name": "our_commit", - "type": "const git_commit *", - "comment": "the commit that reflects the destination tree" + "name": "sig", + "type": "const git_signature *", + "comment": "signature to resolve" + } + ], + "argline": "git_signature **out, const git_mailmap *mm, const git_signature *sig", + "sig": "git_signature **::const git_mailmap *::const git_signature *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Resolve a signature to use real names and emails with a mailmap.

\n", + "comments": "

Call git_signature_free() to free the data.

\n", + "group": "mailmap" + }, + "git_merge_file_input_init": { + "type": "function", + "file": "git2/merge.h", + "line": 66, + "lineto": 68, + "args": [ + { + "name": "opts", + "type": "git_merge_file_input *", + "comment": "the `git_merge_file_input` instance to initialize." }, { - "name": "their_commit", - "type": "const git_commit *", - "comment": "the commit to merge in to `our_commit`" + "name": "version", + "type": "unsigned int", + "comment": "the version of the struct; you should pass\n `GIT_MERGE_FILE_INPUT_VERSION` here." + } + ], + "argline": "git_merge_file_input *opts, unsigned int version", + "sig": "git_merge_file_input *::unsigned int", + "return": { + "type": "int", + "comment": " Zero on success; -1 on failure." + }, + "description": "

Initializes a git_merge_file_input with default values. Equivalent to\n creating an instance with GIT_MERGE_FILE_INPUT_INIT.

\n", + "comments": "", + "group": "merge" + }, + "git_merge_file_options_init": { + "type": "function", + "file": "git2/merge.h", + "line": 243, + "lineto": 243, + "args": [ + { + "name": "opts", + "type": "git_merge_file_options *", + "comment": "The `git_merge_file_options` struct to initialize." }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_MERGE_FILE_OPTIONS_VERSION`." + } + ], + "argline": "git_merge_file_options *opts, unsigned int version", + "sig": "git_merge_file_options *::unsigned int", + "return": { + "type": "int", + "comment": " Zero on success; -1 on failure." + }, + "description": "

Initialize git_merge_file_options structure

\n", + "comments": "

Initializes a git_merge_file_options with default values. Equivalent to creating an instance with GIT_MERGE_FILE_OPTIONS_INIT.

\n", + "group": "merge" + }, + "git_merge_options_init": { + "type": "function", + "file": "git2/merge.h", + "line": 342, + "lineto": 342, + "args": [ { "name": "opts", - "type": "const git_merge_options *", - "comment": "the merge tree options (or null for defaults)" + "type": "git_merge_options *", + "comment": "The `git_merge_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_MERGE_OPTIONS_VERSION`." } ], - "argline": "git_index **out, git_repository *repo, const git_commit *our_commit, const git_commit *their_commit, const git_merge_options *opts", - "sig": "git_index **::git_repository *::const git_commit *::const git_commit *::const git_merge_options *", + "argline": "git_merge_options *opts, unsigned int version", + "sig": "git_merge_options *::unsigned int", "return": { "type": "int", - "comment": " 0 on success or error code" + "comment": " Zero on success; -1 on failure." }, - "description": "

Merge two commits, producing a git_index that reflects the result of\n the merge. The index may be written as-is to the working directory\n or checked out. If the index is to be converted to a tree, the caller\n should resolve any conflicts that arose as part of the merge.

\n", - "comments": "

The returned index must be freed explicitly with git_index_free.

\n", + "description": "

Initialize git_merge_options structure

\n", + "comments": "

Initializes a git_merge_options with default values. Equivalent to creating an instance with GIT_MERGE_OPTIONS_INIT.

\n", "group": "merge" }, - "git_merge": { + "git_merge_analysis": { "type": "function", - "file": "merge.h", - "line": 580, - "lineto": 585, + "file": "git2/merge.h", + "line": 412, + "lineto": 417, "args": [ + { + "name": "analysis_out", + "type": "git_merge_analysis_t *", + "comment": "analysis enumeration that the result is written into" + }, + { + "name": "preference_out", + "type": "git_merge_preference_t *", + "comment": "One of the `git_merge_preference_t` flag." + }, { "name": "repo", "type": "git_repository *", @@ -9607,666 +10170,610 @@ "name": "their_heads_len", "type": "size_t", "comment": "the number of heads to merge" - }, - { - "name": "merge_opts", - "type": "const git_merge_options *", - "comment": "merge options" - }, - { - "name": "checkout_opts", - "type": "const git_checkout_options *", - "comment": "checkout options" } ], - "argline": "git_repository *repo, const git_annotated_commit **their_heads, size_t their_heads_len, const git_merge_options *merge_opts, const git_checkout_options *checkout_opts", - "sig": "git_repository *::const git_annotated_commit **::size_t::const git_merge_options *::const git_checkout_options *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, - "description": "

Merges the given commit(s) into HEAD, writing the results into the working\n directory. Any changes are staged for commit and any conflicts are written\n to the index. Callers should inspect the repository's index after this\n completes, resolve any conflicts and prepare a commit.

\n", - "comments": "

For compatibility with git, the repository is put into a merging state. Once the commit is done (or if the uses wishes to abort), you should clear this state by calling git_repository_state_cleanup().

\n", + "argline": "git_merge_analysis_t *analysis_out, git_merge_preference_t *preference_out, git_repository *repo, const git_annotated_commit **their_heads, size_t their_heads_len", + "sig": "git_merge_analysis_t *::git_merge_preference_t *::git_repository *::const git_annotated_commit **::size_t", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Analyzes the given branch(es) and determines the opportunities for\n merging them into the HEAD of the repository.

\n", + "comments": "", "group": "merge", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_merge-20" - ] - } + "examples": { "merge.c": ["ex/v1.9.1/merge.html#git_merge_analysis-15"] } }, - "git_message_prettify": { + "git_merge_analysis_for_ref": { "type": "function", - "file": "message.h", - "line": 38, - "lineto": 38, + "file": "git2/merge.h", + "line": 431, + "lineto": 437, "args": [ { - "name": "out", - "type": "git_buf *", - "comment": "The user-allocated git_buf which will be filled with the\n cleaned up message." + "name": "analysis_out", + "type": "git_merge_analysis_t *", + "comment": "analysis enumeration that the result is written into" }, { - "name": "message", - "type": "const char *", - "comment": "The message to be prettified." + "name": "preference_out", + "type": "git_merge_preference_t *", + "comment": "One of the `git_merge_preference_t` flag." }, { - "name": "strip_comments", - "type": "int", - "comment": "Non-zero to remove comment lines, 0 to leave them in." + "name": "repo", + "type": "git_repository *", + "comment": "the repository to merge" }, { - "name": "comment_char", - "type": "char", - "comment": "Comment character. Lines starting with this character\n are considered to be comments and removed if `strip_comments` is non-zero." - } - ], - "argline": "git_buf *out, const char *message, int strip_comments, char comment_char", - "sig": "git_buf *::const char *::int::char", - "return": { - "type": "int", - "comment": " 0 or an error code." - }, - "description": "

Clean up excess whitespace and make sure there is a trailing newline in the message.

\n", - "comments": "

Optionally, it can remove lines which start with the comment character.

\n", - "group": "message" - }, - "git_message_trailers": { - "type": "function", - "file": "message.h", - "line": 73, - "lineto": 73, - "args": [ + "name": "our_ref", + "type": "git_reference *", + "comment": "the reference to perform the analysis from" + }, { - "name": "arr", - "type": "git_message_trailer_array *", - "comment": "A pre-allocated git_message_trailer_array struct to be filled in\n with any trailers found during parsing." + "name": "their_heads", + "type": "const git_annotated_commit **", + "comment": "the heads to merge into" }, { - "name": "message", - "type": "const char *", - "comment": "The message to be parsed" + "name": "their_heads_len", + "type": "size_t", + "comment": "the number of heads to merge" } ], - "argline": "git_message_trailer_array *arr, const char *message", - "sig": "git_message_trailer_array *::const char *", - "return": { - "type": "int", - "comment": " 0 on success, or non-zero on error." - }, - "description": "

Parse trailers out of a message, filling the array pointed to by +arr+.

\n", - "comments": "

Trailers are key/value pairs in the last paragraph of a message, not including any patches or conflicts that may be present.

\n", - "group": "message" + "argline": "git_merge_analysis_t *analysis_out, git_merge_preference_t *preference_out, git_repository *repo, git_reference *our_ref, const git_annotated_commit **their_heads, size_t their_heads_len", + "sig": "git_merge_analysis_t *::git_merge_preference_t *::git_repository *::git_reference *::const git_annotated_commit **::size_t", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Analyzes the given branch(es) and determines the opportunities for\n merging them into a reference.

\n", + "comments": "", + "group": "merge" }, - "git_message_trailer_array_free": { + "git_merge_base": { "type": "function", - "file": "message.h", - "line": 79, - "lineto": 79, + "file": "git2/merge.h", + "line": 448, + "lineto": 452, "args": [ { - "name": "arr", - "type": "git_message_trailer_array *", - "comment": null + "name": "out", + "type": "git_oid *", + "comment": "the OID of a merge base between 'one' and 'two'" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository where the commits exist" + }, + { + "name": "one", + "type": "const git_oid *", + "comment": "one of the commits" + }, + { + "name": "two", + "type": "const git_oid *", + "comment": "the other commit" } ], - "argline": "git_message_trailer_array *arr", - "sig": "git_message_trailer_array *", + "argline": "git_oid *out, git_repository *repo, const git_oid *one, const git_oid *two", + "sig": "git_oid *::git_repository *::const git_oid *::const git_oid *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 on success, GIT_ENOTFOUND if not found or error code" }, - "description": "

Clean's up any allocated memory in the git_message_trailer_array filled by\n a call to git_message_trailers.

\n", + "description": "

Find a merge base between two commits

\n", "comments": "", - "group": "message" + "group": "merge", + "examples": { + "log.c": ["ex/v1.9.1/log.html#git_merge_base-31"], + "rev-parse.c": ["ex/v1.9.1/rev-parse.html#git_merge_base-1"] + } }, - "git_note_iterator_new": { + "git_merge_bases": { "type": "function", - "file": "notes.h", - "line": 49, - "lineto": 52, + "file": "git2/merge.h", + "line": 463, + "lineto": 467, "args": [ { "name": "out", - "type": "git_note_iterator **", - "comment": "pointer to the iterator" + "type": "git_oidarray *", + "comment": "array in which to store the resulting ids" }, { "name": "repo", "type": "git_repository *", - "comment": "repository where to look up the note" + "comment": "the repository where the commits exist" }, { - "name": "notes_ref", - "type": "const char *", - "comment": "canonical name of the reference to use (optional); defaults to\n \"refs/notes/commits\"" + "name": "one", + "type": "const git_oid *", + "comment": "one of the commits" + }, + { + "name": "two", + "type": "const git_oid *", + "comment": "the other commit" } ], - "argline": "git_note_iterator **out, git_repository *repo, const char *notes_ref", - "sig": "git_note_iterator **::git_repository *::const char *", + "argline": "git_oidarray *out, git_repository *repo, const git_oid *one, const git_oid *two", + "sig": "git_oidarray *::git_repository *::const git_oid *::const git_oid *", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " 0 on success, GIT_ENOTFOUND if not found or error code" }, - "description": "

Creates a new iterator for notes

\n", - "comments": "

The iterator must be freed manually by the user.

\n", - "group": "note" + "description": "

Find merge bases between two commits

\n", + "comments": "", + "group": "merge" }, - "git_note_commit_iterator_new": { + "git_merge_base_many": { "type": "function", - "file": "notes.h", - "line": 64, - "lineto": 66, + "file": "git2/merge.h", + "line": 478, + "lineto": 482, "args": [ { "name": "out", - "type": "git_note_iterator **", - "comment": "pointer to the iterator" + "type": "git_oid *", + "comment": "the OID of a merge base considering all the commits" }, { - "name": "notes_commit", - "type": "git_commit *", - "comment": "a pointer to the notes commit object" - } - ], - "argline": "git_note_iterator **out, git_commit *notes_commit", - "sig": "git_note_iterator **::git_commit *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Creates a new iterator for notes from a commit

\n", - "comments": "

The iterator must be freed manually by the user.

\n", - "group": "note" - }, - "git_note_iterator_free": { - "type": "function", - "file": "notes.h", - "line": 73, - "lineto": 73, - "args": [ - { - "name": "it", - "type": "git_note_iterator *", - "comment": "pointer to the iterator" - } - ], - "argline": "git_note_iterator *it", - "sig": "git_note_iterator *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Frees an git_note_iterator

\n", - "comments": "", - "group": "note" - }, - "git_note_next": { - "type": "function", - "file": "notes.h", - "line": 86, - "lineto": 89, - "args": [ - { - "name": "note_id", - "type": "git_oid *", - "comment": "id of blob containing the message" + "name": "repo", + "type": "git_repository *", + "comment": "the repository where the commits exist" }, { - "name": "annotated_id", - "type": "git_oid *", - "comment": "id of the git object being annotated" + "name": "length", + "type": "size_t", + "comment": "The number of commits in the provided `input_array`" }, { - "name": "it", - "type": "git_note_iterator *", - "comment": "pointer to the iterator" + "name": "input_array", + "type": "const git_oid []", + "comment": "oids of the commits" } ], - "argline": "git_oid *note_id, git_oid *annotated_id, git_note_iterator *it", - "sig": "git_oid *::git_oid *::git_note_iterator *", + "argline": "git_oid *out, git_repository *repo, size_t length, const git_oid [] input_array", + "sig": "git_oid *::git_repository *::size_t::const git_oid []", "return": { "type": "int", - "comment": " 0 (no error), GIT_ITEROVER (iteration is done) or an error code\n (negative value)" + "comment": " Zero on success; GIT_ENOTFOUND or -1 on failure." }, - "description": "

Return the current item (note_id and annotated_id) and advance the iterator\n internally to the next value

\n", + "description": "

Find a merge base given a list of commits

\n", "comments": "", - "group": "note" + "group": "merge" }, - "git_note_read": { + "git_merge_bases_many": { "type": "function", - "file": "notes.h", - "line": 105, - "lineto": 109, + "file": "git2/merge.h", + "line": 524, + "lineto": 528, "args": [ { "name": "out", - "type": "git_note **", - "comment": "pointer to the read note; NULL in case of error" + "type": "git_oidarray *", + "comment": "array in which to store the resulting ids" }, { "name": "repo", "type": "git_repository *", - "comment": "repository where to look up the note" + "comment": "the repository where the commits exist" }, { - "name": "notes_ref", - "type": "const char *", - "comment": "canonical name of the reference to use (optional); defaults to\n \"refs/notes/commits\"" + "name": "length", + "type": "size_t", + "comment": "The number of commits in the provided `input_array`" }, { - "name": "oid", - "type": "const git_oid *", - "comment": "OID of the git object to read the note from" + "name": "input_array", + "type": "const git_oid []", + "comment": "oids of the commits" } ], - "argline": "git_note **out, git_repository *repo, const char *notes_ref, const git_oid *oid", - "sig": "git_note **::git_repository *::const char *::const git_oid *", + "argline": "git_oidarray *out, git_repository *repo, size_t length, const git_oid [] input_array", + "sig": "git_oidarray *::git_repository *::size_t::const git_oid []", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " Zero on success; GIT_ENOTFOUND or -1 on failure." }, - "description": "

Read the note for an object

\n", - "comments": "

The note must be freed manually by the user.

\n", - "group": "note" + "description": "

Find all merge bases given a list of commits

\n", + "comments": "

This behaves similar to git merge-base.

\n\n

Given three commits a, b, and c, merge_base_many will compute a hypothetical commit m, which is a merge between b and c.

\n\n

For example, with the following topology: text o---o---o---o---C / / o---o---o---B / / ---2---1---o---o---o---A

\n\n

the result of merge_base_many given a, b, and c is 1. This is because the equivalent topology with the imaginary merge commit m between b and c is: text o---o---o---o---o / \\ / o---o---o---o---M / / ---2---1---o---o---o---A

\n\n

and the result of merge_base_many given a and m is 1.

\n\n

If you're looking to recieve the common ancestor between all the given commits, use merge_base_octopus.

\n", + "group": "merge" }, - "git_note_commit_read": { + "git_merge_base_octopus": { "type": "function", - "file": "notes.h", - "line": 124, - "lineto": 128, + "file": "git2/merge.h", + "line": 539, + "lineto": 543, "args": [ { "name": "out", - "type": "git_note **", - "comment": "pointer to the read note; NULL in case of error" + "type": "git_oid *", + "comment": "the OID of a merge base considering all the commits" }, { "name": "repo", "type": "git_repository *", - "comment": "repository where to look up the note" + "comment": "the repository where the commits exist" }, { - "name": "notes_commit", - "type": "git_commit *", - "comment": "a pointer to the notes commit object" + "name": "length", + "type": "size_t", + "comment": "The number of commits in the provided `input_array`" }, { - "name": "oid", - "type": "const git_oid *", - "comment": "OID of the git object to read the note from" + "name": "input_array", + "type": "const git_oid []", + "comment": "oids of the commits" } ], - "argline": "git_note **out, git_repository *repo, git_commit *notes_commit, const git_oid *oid", - "sig": "git_note **::git_repository *::git_commit *::const git_oid *", + "argline": "git_oid *out, git_repository *repo, size_t length, const git_oid [] input_array", + "sig": "git_oid *::git_repository *::size_t::const git_oid []", "return": { "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Read the note for an object from a note commit

\n", - "comments": "

The note must be freed manually by the user.

\n", - "group": "note" - }, - "git_note_author": { - "type": "function", - "file": "notes.h", - "line": 136, - "lineto": 136, - "args": [ - { - "name": "note", - "type": "const git_note *", - "comment": "the note" - } - ], - "argline": "const git_note *note", - "sig": "const git_note *", - "return": { - "type": "const git_signature *", - "comment": " the author" + "comment": " Zero on success; GIT_ENOTFOUND or -1 on failure." }, - "description": "

Get the note author

\n", + "description": "

Find a merge base in preparation for an octopus merge

\n", "comments": "", - "group": "note" + "group": "merge" }, - "git_note_committer": { + "git_merge_file": { "type": "function", - "file": "notes.h", - "line": 144, - "lineto": 144, + "file": "git2/merge.h", + "line": 561, + "lineto": 566, "args": [ { - "name": "note", - "type": "const git_note *", - "comment": "the note" - } - ], - "argline": "const git_note *note", - "sig": "const git_note *", - "return": { - "type": "const git_signature *", - "comment": " the committer" - }, - "description": "

Get the note committer

\n", - "comments": "", - "group": "note" - }, - "git_note_message": { - "type": "function", - "file": "notes.h", - "line": 153, - "lineto": 153, - "args": [ + "name": "out", + "type": "git_merge_file_result *", + "comment": "The git_merge_file_result to be filled in" + }, { - "name": "note", - "type": "const git_note *", - "comment": "the note" - } - ], - "argline": "const git_note *note", - "sig": "const git_note *", - "return": { - "type": "const char *", - "comment": " the note message" - }, - "description": "

Get the note message

\n", - "comments": "", - "group": "note" - }, - "git_note_id": { - "type": "function", - "file": "notes.h", - "line": 162, - "lineto": 162, - "args": [ + "name": "ancestor", + "type": "const git_merge_file_input *", + "comment": "The contents of the ancestor file" + }, + { + "name": "ours", + "type": "const git_merge_file_input *", + "comment": "The contents of the file in \"our\" side" + }, + { + "name": "theirs", + "type": "const git_merge_file_input *", + "comment": "The contents of the file in \"their\" side" + }, { - "name": "note", - "type": "const git_note *", - "comment": "the note" + "name": "opts", + "type": "const git_merge_file_options *", + "comment": "The merge file options or `NULL` for defaults" } ], - "argline": "const git_note *note", - "sig": "const git_note *", - "return": { - "type": "const git_oid *", - "comment": " the note object's id" - }, - "description": "

Get the note object's id

\n", - "comments": "", - "group": "note" + "argline": "git_merge_file_result *out, const git_merge_file_input *ancestor, const git_merge_file_input *ours, const git_merge_file_input *theirs, const git_merge_file_options *opts", + "sig": "git_merge_file_result *::const git_merge_file_input *::const git_merge_file_input *::const git_merge_file_input *::const git_merge_file_options *", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Merge two files as they exist in the in-memory data structures, using\n the given common ancestor as the baseline, producing a\n git_merge_file_result that reflects the merge result. The\n git_merge_file_result must be freed with git_merge_file_result_free.

\n", + "comments": "

Note that this function does not reference a repository and any configuration must be passed as git_merge_file_options.

\n", + "group": "merge" }, - "git_note_create": { + "git_merge_file_from_index": { "type": "function", - "file": "notes.h", - "line": 179, - "lineto": 187, + "file": "git2/merge.h", + "line": 582, + "lineto": 588, "args": [ { "name": "out", - "type": "git_oid *", - "comment": "pointer to store the OID (optional); NULL in case of error" + "type": "git_merge_file_result *", + "comment": "The git_merge_file_result to be filled in" }, { "name": "repo", "type": "git_repository *", - "comment": "repository where to store the note" - }, - { - "name": "notes_ref", - "type": "const char *", - "comment": "canonical name of the reference to use (optional);\n\t\t\t\t\tdefaults to \"refs/notes/commits\"" - }, - { - "name": "author", - "type": "const git_signature *", - "comment": "signature of the notes commit author" + "comment": "The repository" }, { - "name": "committer", - "type": "const git_signature *", - "comment": "signature of the notes commit committer" + "name": "ancestor", + "type": "const git_index_entry *", + "comment": "The index entry for the ancestor file (stage level 1)" }, { - "name": "oid", - "type": "const git_oid *", - "comment": "OID of the git object to decorate" + "name": "ours", + "type": "const git_index_entry *", + "comment": "The index entry for our file (stage level 2)" }, { - "name": "note", - "type": "const char *", - "comment": "Content of the note to add for object oid" + "name": "theirs", + "type": "const git_index_entry *", + "comment": "The index entry for their file (stage level 3)" }, { - "name": "force", - "type": "int", - "comment": "Overwrite existing note" + "name": "opts", + "type": "const git_merge_file_options *", + "comment": "The merge file options or NULL" } ], - "argline": "git_oid *out, git_repository *repo, const char *notes_ref, const git_signature *author, const git_signature *committer, const git_oid *oid, const char *note, int force", - "sig": "git_oid *::git_repository *::const char *::const git_signature *::const git_signature *::const git_oid *::const char *::int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Add a note for an object

\n", + "argline": "git_merge_file_result *out, git_repository *repo, const git_index_entry *ancestor, const git_index_entry *ours, const git_index_entry *theirs, const git_merge_file_options *opts", + "sig": "git_merge_file_result *::git_repository *::const git_index_entry *::const git_index_entry *::const git_index_entry *::const git_merge_file_options *", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Merge two files as they exist in the index, using the given common\n ancestor as the baseline, producing a git_merge_file_result that\n reflects the merge result. The git_merge_file_result must be freed with\n git_merge_file_result_free.

\n", "comments": "", - "group": "note" + "group": "merge" }, - "git_note_commit_create": { + "git_merge_file_result_free": { "type": "function", - "file": "notes.h", - "line": 209, - "lineto": 218, + "file": "git2/merge.h", + "line": 595, + "lineto": 595, "args": [ { - "name": "notes_commit_out", - "type": "git_oid *", - "comment": "pointer to store the commit (optional);\n\t\t\t\t\tNULL in case of error" - }, + "name": "result", + "type": "git_merge_file_result *", + "comment": "The result to free or `NULL`" + } + ], + "argline": "git_merge_file_result *result", + "sig": "git_merge_file_result *", + "return": { "type": "void", "comment": null }, + "description": "

Frees a git_merge_file_result.

\n", + "comments": "", + "group": "merge" + }, + "git_merge_trees": { + "type": "function", + "file": "git2/merge.h", + "line": 613, + "lineto": 619, + "args": [ { - "name": "notes_blob_out", - "type": "git_oid *", - "comment": "a point to the id of a note blob (optional)" + "name": "out", + "type": "git_index **", + "comment": "pointer to store the index result in" }, { "name": "repo", "type": "git_repository *", - "comment": "repository where the note will live" + "comment": "repository that contains the given trees" }, { - "name": "parent", - "type": "git_commit *", - "comment": "Pointer to parent note\n\t\t\t\t\tor NULL if this shall start a new notes tree" + "name": "ancestor_tree", + "type": "const git_tree *", + "comment": "the common ancestor between the trees (or null if none)" }, { - "name": "author", - "type": "const git_signature *", - "comment": "signature of the notes commit author" + "name": "our_tree", + "type": "const git_tree *", + "comment": "the tree that reflects the destination tree" }, { - "name": "committer", - "type": "const git_signature *", - "comment": "signature of the notes commit committer" + "name": "their_tree", + "type": "const git_tree *", + "comment": "the tree to merge in to `our_tree`" }, { - "name": "oid", - "type": "const git_oid *", - "comment": "OID of the git object to decorate" + "name": "opts", + "type": "const git_merge_options *", + "comment": "the merge tree options (or null for defaults)" + } + ], + "argline": "git_index **out, git_repository *repo, const git_tree *ancestor_tree, const git_tree *our_tree, const git_tree *their_tree, const git_merge_options *opts", + "sig": "git_index **::git_repository *::const git_tree *::const git_tree *::const git_tree *::const git_merge_options *", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Merge two trees, producing a git_index that reflects the result of\n the merge. The index may be written as-is to the working directory\n or checked out. If the index is to be converted to a tree, the caller\n should resolve any conflicts that arose as part of the merge.

\n", + "comments": "

The returned index must be freed explicitly with git_index_free.

\n", + "group": "merge" + }, + "git_merge_commits": { + "type": "function", + "file": "git2/merge.h", + "line": 636, + "lineto": 641, + "args": [ + { + "name": "out", + "type": "git_index **", + "comment": "pointer to store the index result in" }, { - "name": "note", - "type": "const char *", - "comment": "Content of the note to add for object oid" + "name": "repo", + "type": "git_repository *", + "comment": "repository that contains the given trees" }, { - "name": "allow_note_overwrite", - "type": "int", - "comment": "Overwrite existing note" + "name": "our_commit", + "type": "const git_commit *", + "comment": "the commit that reflects the destination tree" + }, + { + "name": "their_commit", + "type": "const git_commit *", + "comment": "the commit to merge in to `our_commit`" + }, + { + "name": "opts", + "type": "const git_merge_options *", + "comment": "the merge tree options (or null for defaults)" } ], - "argline": "git_oid *notes_commit_out, git_oid *notes_blob_out, git_repository *repo, git_commit *parent, const git_signature *author, const git_signature *committer, const git_oid *oid, const char *note, int allow_note_overwrite", - "sig": "git_oid *::git_oid *::git_repository *::git_commit *::const git_signature *::const git_signature *::const git_oid *::const char *::int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Add a note for an object from a commit

\n", - "comments": "

This function will create a notes commit for a given object, the commit is a dangling commit, no reference is created.

\n", - "group": "note" + "argline": "git_index **out, git_repository *repo, const git_commit *our_commit, const git_commit *their_commit, const git_merge_options *opts", + "sig": "git_index **::git_repository *::const git_commit *::const git_commit *::const git_merge_options *", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Merge two commits, producing a git_index that reflects the result of\n the merge. The index may be written as-is to the working directory\n or checked out. If the index is to be converted to a tree, the caller\n should resolve any conflicts that arose as part of the merge.

\n", + "comments": "

The returned index must be freed explicitly with git_index_free.

\n", + "group": "merge" }, - "git_note_remove": { + "git_merge": { "type": "function", - "file": "notes.h", - "line": 232, - "lineto": 237, + "file": "git2/merge.h", + "line": 661, + "lineto": 666, "args": [ { "name": "repo", "type": "git_repository *", - "comment": "repository where the note lives" + "comment": "the repository to merge" }, { - "name": "notes_ref", - "type": "const char *", - "comment": "canonical name of the reference to use (optional);\n\t\t\t\t\tdefaults to \"refs/notes/commits\"" + "name": "their_heads", + "type": "const git_annotated_commit **", + "comment": "the heads to merge into" }, { - "name": "author", - "type": "const git_signature *", - "comment": "signature of the notes commit author" + "name": "their_heads_len", + "type": "size_t", + "comment": "the number of heads to merge" }, { - "name": "committer", - "type": "const git_signature *", - "comment": "signature of the notes commit committer" + "name": "merge_opts", + "type": "const git_merge_options *", + "comment": "merge options" }, { - "name": "oid", - "type": "const git_oid *", - "comment": "OID of the git object to remove the note from" + "name": "checkout_opts", + "type": "const git_checkout_options *", + "comment": "checkout options" } ], - "argline": "git_repository *repo, const char *notes_ref, const git_signature *author, const git_signature *committer, const git_oid *oid", - "sig": "git_repository *::const char *::const git_signature *::const git_signature *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Remove the note for an object

\n", - "comments": "", - "group": "note" + "argline": "git_repository *repo, const git_annotated_commit **their_heads, size_t their_heads_len, const git_merge_options *merge_opts, const git_checkout_options *checkout_opts", + "sig": "git_repository *::const git_annotated_commit **::size_t::const git_merge_options *::const git_checkout_options *", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Merges the given commit(s) into HEAD, writing the results into the working\n directory. Any changes are staged for commit and any conflicts are written\n to the index. Callers should inspect the repository's index after this\n completes, resolve any conflicts and prepare a commit.

\n", + "comments": "

For compatibility with git, the repository is put into a merging state. Once the commit is done (or if the user wishes to abort), you should clear this state by calling git_repository_state_cleanup().

\n", + "group": "merge", + "examples": { "merge.c": ["ex/v1.9.1/merge.html#git_merge-16"] } }, - "git_note_commit_remove": { + "git_message_prettify": { "type": "function", - "file": "notes.h", - "line": 257, - "lineto": 263, + "file": "git2/message.h", + "line": 38, + "lineto": 38, "args": [ { - "name": "notes_commit_out", - "type": "git_oid *", - "comment": "pointer to store the new notes commit (optional);\n\t\t\t\t\tNULL in case of error.\n\t\t\t\t\tWhen removing a note a new tree containing all notes\n\t\t\t\t\tsans the note to be removed is created and a new commit\n\t\t\t\t\tpointing to that tree is also created.\n\t\t\t\t\tIn the case where the resulting tree is an empty tree\n\t\t\t\t\ta new commit pointing to this empty tree will be returned." + "name": "out", + "type": "git_buf *", + "comment": "The user-allocated git_buf which will be filled with the\n cleaned up message." }, { - "name": "repo", - "type": "git_repository *", - "comment": "repository where the note lives" + "name": "message", + "type": "const char *", + "comment": "The message to be prettified." }, { - "name": "notes_commit", - "type": "git_commit *", - "comment": "a pointer to the notes commit object" + "name": "strip_comments", + "type": "int", + "comment": "Non-zero to remove comment lines, 0 to leave them in." }, { - "name": "author", - "type": "const git_signature *", - "comment": "signature of the notes commit author" - }, + "name": "comment_char", + "type": "char", + "comment": "Comment character. Lines starting with this character\n are considered to be comments and removed if `strip_comments` is non-zero." + } + ], + "argline": "git_buf *out, const char *message, int strip_comments, char comment_char", + "sig": "git_buf *::const char *::int::char", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Clean up excess whitespace and make sure there is a trailing newline in the message.

\n", + "comments": "

Optionally, it can remove lines which start with the comment character.

\n", + "group": "message" + }, + "git_message_trailers": { + "type": "function", + "file": "git2/message.h", + "line": 73, + "lineto": 73, + "args": [ { - "name": "committer", - "type": "const git_signature *", - "comment": "signature of the notes commit committer" + "name": "arr", + "type": "git_message_trailer_array *", + "comment": "A pre-allocated git_message_trailer_array struct to be filled in\n with any trailers found during parsing." }, { - "name": "oid", - "type": "const git_oid *", - "comment": "OID of the git object to remove the note from" + "name": "message", + "type": "const char *", + "comment": "The message to be parsed" } ], - "argline": "git_oid *notes_commit_out, git_repository *repo, git_commit *notes_commit, const git_signature *author, const git_signature *committer, const git_oid *oid", - "sig": "git_oid *::git_repository *::git_commit *::const git_signature *::const git_signature *::const git_oid *", + "argline": "git_message_trailer_array *arr, const char *message", + "sig": "git_message_trailer_array *::const char *", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " 0 on success, or non-zero on error." }, - "description": "

Remove the note for an object

\n", + "description": "

Parse trailers out of a message, filling the array pointed to by +arr+.

\n", + "comments": "

Trailers are key/value pairs in the last paragraph of a message, not including any patches or conflicts that may be present.

\n", + "group": "message" + }, + "git_message_trailer_array_free": { + "type": "function", + "file": "git2/message.h", + "line": 81, + "lineto": 81, + "args": [ + { + "name": "arr", + "type": "git_message_trailer_array *", + "comment": "The trailer to free." + } + ], + "argline": "git_message_trailer_array *arr", + "sig": "git_message_trailer_array *", + "return": { "type": "void", "comment": null }, + "description": "

Clean's up any allocated memory in the git_message_trailer_array filled by\n a call to git_message_trailers.

\n", "comments": "", - "group": "note" + "group": "message" }, - "git_note_free": { + "git_note_iterator_free": { "type": "function", - "file": "notes.h", - "line": 270, - "lineto": 270, + "file": "git2/notes.h", + "line": 75, + "lineto": 75, "args": [ { - "name": "note", - "type": "git_note *", - "comment": "git_note object" + "name": "it", + "type": "git_note_iterator *", + "comment": "pointer to the iterator" } ], - "argline": "git_note *note", - "sig": "git_note *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Free a git_note object

\n", + "argline": "git_note_iterator *it", + "sig": "git_note_iterator *", + "return": { "type": "void", "comment": null }, + "description": "

Frees an git_note_iterator

\n", "comments": "", "group": "note" }, - "git_note_foreach": { + "git_note_next": { "type": "function", - "file": "notes.h", - "line": 298, - "lineto": 302, + "file": "git2/notes.h", + "line": 88, + "lineto": 91, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "Repository where to find the notes." - }, - { - "name": "notes_ref", - "type": "const char *", - "comment": "Reference to read from (optional); defaults to\n \"refs/notes/commits\"." + "name": "note_id", + "type": "git_oid *", + "comment": "id of blob containing the message" }, { - "name": "note_cb", - "type": "git_note_foreach_cb", - "comment": "Callback to invoke per found annotation. Return non-zero\n to stop looping." + "name": "annotated_id", + "type": "git_oid *", + "comment": "id of the git object being annotated" }, { - "name": "payload", - "type": "void *", - "comment": "Extra parameter to callback function." + "name": "it", + "type": "git_note_iterator *", + "comment": "pointer to the iterator" } ], - "argline": "git_repository *repo, const char *notes_ref, git_note_foreach_cb note_cb, void *payload", - "sig": "git_repository *::const char *::git_note_foreach_cb::void *", + "argline": "git_oid *note_id, git_oid *annotated_id, git_note_iterator *it", + "sig": "git_oid *::git_oid *::git_note_iterator *", "return": { "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" + "comment": " 0 (no error), GIT_ITEROVER (iteration is done) or an error code\n (negative value)" }, - "description": "

Loop over all the notes within a specified namespace\n and issue a callback for each one.

\n", + "description": "

Return the current item (note_id and annotated_id) and advance the iterator\n internally to the next value

\n", "comments": "", "group": "note" }, "git_object_lookup": { "type": "function", - "file": "object.h", - "line": 42, - "lineto": 46, + "file": "git2/object.h", + "line": 45, + "lineto": 49, "args": [ { "name": "object", @@ -10285,33 +10792,26 @@ }, { "name": "type", - "type": "git_otype", + "type": "git_object_t", "comment": "the type of the object" } ], - "argline": "git_object **object, git_repository *repo, const git_oid *id, git_otype type", - "sig": "git_object **::git_repository *::const git_oid *::git_otype", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "argline": "git_object **object, git_repository *repo, const git_oid *id, git_object_t type", + "sig": "git_object **::git_repository *::const git_oid *::git_object_t", + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a reference to one of the objects in a repository.

\n", - "comments": "

The generated reference is owned by the repository and should be closed with the git_object_free method instead of free'd manually.

\n\n

The 'type' parameter must match the type of the object in the odb; the method will fail otherwise. The special value 'GIT_OBJ_ANY' may be passed to let the method guess the object's type.

\n", + "comments": "

The generated reference is owned by the repository and should be closed with the git_object_free method instead of free'd manually.

\n\n

The 'type' parameter must match the type of the object in the odb; the method will fail otherwise. The special value 'GIT_OBJECT_ANY' may be passed to let the method guess the object's type.

\n", "group": "object", "examples": { - "log.c": [ - "ex/HEAD/log.html#git_object_lookup-34" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_object_lookup-21" - ] + "log.c": ["ex/v1.9.1/log.html#git_object_lookup-32"], + "merge.c": ["ex/v1.9.1/merge.html#git_object_lookup-17"] } }, "git_object_lookup_prefix": { "type": "function", - "file": "object.h", - "line": 75, - "lineto": 80, + "file": "git2/object.h", + "line": 78, + "lineto": 83, "args": [ { "name": "object_out", @@ -10335,25 +10835,22 @@ }, { "name": "type", - "type": "git_otype", + "type": "git_object_t", "comment": "the type of the object" } ], - "argline": "git_object **object_out, git_repository *repo, const git_oid *id, size_t len, git_otype type", - "sig": "git_object **::git_repository *::const git_oid *::size_t::git_otype", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "argline": "git_object **object_out, git_repository *repo, const git_oid *id, size_t len, git_object_t type", + "sig": "git_object **::git_repository *::const git_oid *::size_t::git_object_t", + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a reference to one of the objects in a repository,\n given a prefix of its identifier (short id).

\n", - "comments": "

The object obtained will be so that its identifier matches the first 'len' hexadecimal characters (packets of 4 bits) of the given 'id'. 'len' must be at least GIT_OID_MINPREFIXLEN, and long enough to identify a unique object matching the prefix; otherwise the method will fail.

\n\n

The generated reference is owned by the repository and should be closed with the git_object_free method instead of free'd manually.

\n\n

The 'type' parameter must match the type of the object in the odb; the method will fail otherwise. The special value 'GIT_OBJ_ANY' may be passed to let the method guess the object's type.

\n", + "comments": "

The object obtained will be so that its identifier matches the first 'len' hexadecimal characters (packets of 4 bits) of the given id. len must be at least GIT_OID_MINPREFIXLEN, and long enough to identify a unique object matching the prefix; otherwise the method will fail.

\n\n

The generated reference is owned by the repository and should be closed with the git_object_free method instead of free'd manually.

\n\n

The type parameter must match the type of the object in the odb; the method will fail otherwise. The special value GIT_OBJECT_ANY may be passed to let the method guess the object's type.

\n", "group": "object" }, "git_object_lookup_bypath": { "type": "function", - "file": "object.h", - "line": 93, - "lineto": 97, + "file": "git2/object.h", + "line": 96, + "lineto": 100, "args": [ { "name": "out", @@ -10372,25 +10869,22 @@ }, { "name": "type", - "type": "git_otype", + "type": "git_object_t", "comment": "type of object desired" } ], - "argline": "git_object **out, const git_object *treeish, const char *path, git_otype type", - "sig": "git_object **::const git_object *::const char *::git_otype", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, + "argline": "git_object **out, const git_object *treeish, const char *path, git_object_t type", + "sig": "git_object **::const git_object *::const char *::git_object_t", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Lookup an object that represents a tree entry.

\n", "comments": "", "group": "object" }, "git_object_id": { "type": "function", - "file": "object.h", - "line": 105, - "lineto": 105, + "file": "git2/object.h", + "line": 108, + "lineto": 108, "args": [ { "name": "obj", @@ -10400,44 +10894,41 @@ ], "argline": "const git_object *obj", "sig": "const git_object *", - "return": { - "type": "const git_oid *", - "comment": " the SHA1 id" - }, + "return": { "type": "const git_oid *", "comment": " the SHA1 id" }, "description": "

Get the id (SHA1) of a repository object

\n", "comments": "", "group": "object", "examples": { "blame.c": [ - "ex/HEAD/blame.html#git_object_id-10", - "ex/HEAD/blame.html#git_object_id-11", - "ex/HEAD/blame.html#git_object_id-12", - "ex/HEAD/blame.html#git_object_id-13" + "ex/v1.9.1/blame.html#git_object_id-7", + "ex/v1.9.1/blame.html#git_object_id-8", + "ex/v1.9.1/blame.html#git_object_id-9", + "ex/v1.9.1/blame.html#git_object_id-10" ], "cat-file.c": [ - "ex/HEAD/cat-file.html#git_object_id-12", - "ex/HEAD/cat-file.html#git_object_id-13" + "ex/v1.9.1/cat-file.html#git_object_id-10", + "ex/v1.9.1/cat-file.html#git_object_id-11" ], "log.c": [ - "ex/HEAD/log.html#git_object_id-35", - "ex/HEAD/log.html#git_object_id-36", - "ex/HEAD/log.html#git_object_id-37", - "ex/HEAD/log.html#git_object_id-38" + "ex/v1.9.1/log.html#git_object_id-33", + "ex/v1.9.1/log.html#git_object_id-34", + "ex/v1.9.1/log.html#git_object_id-35", + "ex/v1.9.1/log.html#git_object_id-36" ], "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_object_id-4", - "ex/HEAD/rev-parse.html#git_object_id-5", - "ex/HEAD/rev-parse.html#git_object_id-6", - "ex/HEAD/rev-parse.html#git_object_id-7", - "ex/HEAD/rev-parse.html#git_object_id-8" + "ex/v1.9.1/rev-parse.html#git_object_id-2", + "ex/v1.9.1/rev-parse.html#git_object_id-3", + "ex/v1.9.1/rev-parse.html#git_object_id-4", + "ex/v1.9.1/rev-parse.html#git_object_id-5", + "ex/v1.9.1/rev-parse.html#git_object_id-6" ] } }, "git_object_short_id": { "type": "function", - "file": "object.h", - "line": 119, - "lineto": 119, + "file": "git2/object.h", + "line": 122, + "lineto": 122, "args": [ { "name": "out", @@ -10452,24 +10943,17 @@ ], "argline": "git_buf *out, const git_object *obj", "sig": "git_buf *::const git_object *", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 for error" - }, + "return": { "type": "int", "comment": " 0 on success, \n<\n0 for error" }, "description": "

Get a short abbreviated OID string for the object

\n", "comments": "

This starts at the "core.abbrev" length (default 7 characters) and iteratively extends to a longer string if that length is ambiguous. The result will be unambiguous (at least until new objects are added to the repository).

\n", "group": "object", - "examples": { - "tag.c": [ - "ex/HEAD/tag.html#git_object_short_id-5" - ] - } + "examples": { "tag.c": ["ex/v1.9.1/tag.html#git_object_short_id-3"] } }, "git_object_type": { "type": "function", - "file": "object.h", - "line": 127, - "lineto": 127, + "file": "git2/object.h", + "line": 130, + "lineto": 130, "args": [ { "name": "obj", @@ -10479,35 +10963,26 @@ ], "argline": "const git_object *obj", "sig": "const git_object *", - "return": { - "type": "git_otype", - "comment": " the object's type" - }, + "return": { "type": "git_object_t", "comment": " the object's type" }, "description": "

Get the object type of an object

\n", "comments": "", "group": "object", "examples": { "cat-file.c": [ - "ex/HEAD/cat-file.html#git_object_type-14", - "ex/HEAD/cat-file.html#git_object_type-15", - "ex/HEAD/cat-file.html#git_object_type-16" + "ex/v1.9.1/cat-file.html#git_object_type-12", + "ex/v1.9.1/cat-file.html#git_object_type-13", + "ex/v1.9.1/cat-file.html#git_object_type-14" ], - "tag.c": [ - "ex/HEAD/tag.html#git_object_type-6" - ] + "tag.c": ["ex/v1.9.1/tag.html#git_object_type-4"] } }, "git_object_owner": { "type": "function", - "file": "object.h", - "line": 141, - "lineto": 141, + "file": "git2/object.h", + "line": 144, + "lineto": 144, "args": [ - { - "name": "obj", - "type": "const git_object *", - "comment": "the object" - } + { "name": "obj", "type": "const git_object *", "comment": "the object" } ], "argline": "const git_object *obj", "sig": "const git_object *", @@ -10521,9 +10996,9 @@ }, "git_object_free": { "type": "function", - "file": "object.h", - "line": 158, - "lineto": 158, + "file": "git2/object.h", + "line": 161, + "lineto": 161, "args": [ { "name": "object", @@ -10533,59 +11008,49 @@ ], "argline": "git_object *object", "sig": "git_object *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an open object

\n", "comments": "

This method instructs the library to close an existing object; note that git_objects are owned and cached by the repository so the object may or may not be freed after this library call, depending on how aggressive is the caching mechanism used by the repository.

\n\n

IMPORTANT: It is necessary to call this method when you stop using an object. Failure to do so will cause a memory leak.

\n", "group": "object", "examples": { "blame.c": [ - "ex/HEAD/blame.html#git_object_free-14", - "ex/HEAD/blame.html#git_object_free-15", - "ex/HEAD/blame.html#git_object_free-16", - "ex/HEAD/blame.html#git_object_free-17" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_object_free-17" - ], - "general.c": [ - "ex/HEAD/general.html#git_object_free-38" - ], - "log.c": [ - "ex/HEAD/log.html#git_object_free-39" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_object_free-22" - ], - "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_object_free-9", - "ex/HEAD/rev-parse.html#git_object_free-10", - "ex/HEAD/rev-parse.html#git_object_free-11" + "ex/v1.9.1/blame.html#git_object_free-11", + "ex/v1.9.1/blame.html#git_object_free-12", + "ex/v1.9.1/blame.html#git_object_free-13", + "ex/v1.9.1/blame.html#git_object_free-14" + ], + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_object_free-15"], + "commit.c": ["ex/v1.9.1/commit.html#git_object_free-6"], + "general.c": ["ex/v1.9.1/general.html#git_object_free-38"], + "log.c": ["ex/v1.9.1/log.html#git_object_free-37"], + "merge.c": ["ex/v1.9.1/merge.html#git_object_free-18"], + "rev-parse.c": [ + "ex/v1.9.1/rev-parse.html#git_object_free-7", + "ex/v1.9.1/rev-parse.html#git_object_free-8", + "ex/v1.9.1/rev-parse.html#git_object_free-9" ], "tag.c": [ - "ex/HEAD/tag.html#git_object_free-7", - "ex/HEAD/tag.html#git_object_free-8", - "ex/HEAD/tag.html#git_object_free-9", - "ex/HEAD/tag.html#git_object_free-10" + "ex/v1.9.1/tag.html#git_object_free-5", + "ex/v1.9.1/tag.html#git_object_free-6", + "ex/v1.9.1/tag.html#git_object_free-7", + "ex/v1.9.1/tag.html#git_object_free-8" ] } }, "git_object_type2string": { "type": "function", - "file": "object.h", - "line": 169, - "lineto": 169, + "file": "git2/object.h", + "line": 172, + "lineto": 172, "args": [ { "name": "type", - "type": "git_otype", + "type": "git_object_t", "comment": "object type to convert." } ], - "argline": "git_otype type", - "sig": "git_otype", + "argline": "git_object_t type", + "sig": "git_object_t", "return": { "type": "const char *", "comment": " the corresponding string representation." @@ -10595,22 +11060,22 @@ "group": "object", "examples": { "cat-file.c": [ - "ex/HEAD/cat-file.html#git_object_type2string-18", - "ex/HEAD/cat-file.html#git_object_type2string-19", - "ex/HEAD/cat-file.html#git_object_type2string-20", - "ex/HEAD/cat-file.html#git_object_type2string-21" + "ex/v1.9.1/cat-file.html#git_object_type2string-16", + "ex/v1.9.1/cat-file.html#git_object_type2string-17", + "ex/v1.9.1/cat-file.html#git_object_type2string-18", + "ex/v1.9.1/cat-file.html#git_object_type2string-19" ], "general.c": [ - "ex/HEAD/general.html#git_object_type2string-39", - "ex/HEAD/general.html#git_object_type2string-40" + "ex/v1.9.1/general.html#git_object_type2string-39", + "ex/v1.9.1/general.html#git_object_type2string-40" ] } }, "git_object_string2type": { "type": "function", - "file": "object.h", - "line": 177, - "lineto": 177, + "file": "git2/object.h", + "line": 180, + "lineto": 180, "args": [ { "name": "str", @@ -10621,62 +11086,40 @@ "argline": "const char *str", "sig": "const char *", "return": { - "type": "git_otype", - "comment": " the corresponding git_otype." + "type": "git_object_t", + "comment": " the corresponding git_object_t." }, - "description": "

Convert a string object type representation to it's git_otype.

\n", + "description": "

Convert a string object type representation to it's git_object_t.

\n", "comments": "", "group": "object" }, "git_object_typeisloose": { "type": "function", - "file": "object.h", - "line": 186, - "lineto": 186, + "file": "git2/object.h", + "line": 189, + "lineto": 189, "args": [ { "name": "type", - "type": "git_otype", + "type": "git_object_t", "comment": "object type to test." } ], - "argline": "git_otype type", - "sig": "git_otype", + "argline": "git_object_t type", + "sig": "git_object_t", "return": { "type": "int", "comment": " true if the type represents a valid loose object type,\n false otherwise." }, - "description": "

Determine if the given git_otype is a valid loose object type.

\n", + "description": "

Determine if the given git_object_t is a valid loose object type.

\n", "comments": "", "group": "object" }, - "git_object__size": { - "type": "function", - "file": "object.h", - "line": 200, - "lineto": 200, - "args": [ - { - "name": "type", - "type": "git_otype", - "comment": "object type to get its size" - } - ], - "argline": "git_otype type", - "sig": "git_otype", - "return": { - "type": "size_t", - "comment": " size in bytes of the object" - }, - "description": "

Get the size in bytes for the structure which\n acts as an in-memory representation of any given\n object type.

\n", - "comments": "

For all the core types, this would the equivalent of calling sizeof(git_commit) if the core types were not opaque on the external API.

\n", - "group": "object" - }, "git_object_peel": { "type": "function", - "file": "object.h", - "line": 225, - "lineto": 228, + "file": "git2/object.h", + "line": 214, + "lineto": 217, "args": [ { "name": "peeled", @@ -10690,25 +11133,25 @@ }, { "name": "target_type", - "type": "git_otype", - "comment": "The type of the requested object (a GIT_OBJ_ value)" + "type": "git_object_t", + "comment": "The type of the requested object (a GIT_OBJECT_ value)" } ], - "argline": "git_object **peeled, const git_object *object, git_otype target_type", - "sig": "git_object **::const git_object *::git_otype", + "argline": "git_object **peeled, const git_object *object, git_object_t target_type", + "sig": "git_object **::const git_object *::git_object_t", "return": { "type": "int", "comment": " 0 on success, GIT_EINVALIDSPEC, GIT_EPEEL, or an error code" }, "description": "

Recursively peel an object until an object of the specified type is met.

\n", - "comments": "

If the query cannot be satisfied due to the object model, GIT_EINVALIDSPEC will be returned (e.g. trying to peel a blob to a tree).

\n\n

If you pass GIT_OBJ_ANY as the target type, then the object will be peeled until the type changes. A tag will be peeled until the referenced object is no longer a tag, and a commit will be peeled to a tree. Any other object type will return GIT_EINVALIDSPEC.

\n\n

If peeling a tag we discover an object which cannot be peeled to the target type due to the object model, GIT_EPEEL will be returned.

\n\n

You must free the returned object.

\n", + "comments": "

If the query cannot be satisfied due to the object model, GIT_EINVALIDSPEC will be returned (e.g. trying to peel a blob to a tree).

\n\n

If you pass GIT_OBJECT_ANY as the target type, then the object will be peeled until the type changes. A tag will be peeled until the referenced object is no longer a tag, and a commit will be peeled to a tree. Any other object type will return GIT_EINVALIDSPEC.

\n\n

If peeling a tag we discover an object which cannot be peeled to the target type due to the object model, GIT_EPEEL will be returned.

\n\n

You must free the returned object.

\n", "group": "object" }, "git_object_dup": { "type": "function", - "file": "object.h", - "line": 237, - "lineto": 237, + "file": "git2/object.h", + "line": 227, + "lineto": 227, "args": [ { "name": "dest", @@ -10723,44 +11166,72 @@ ], "argline": "git_object **dest, git_object *source", "sig": "git_object **::git_object *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create an in-memory copy of a Git object. The copy must be\n explicitly free'd or it will leak.

\n", "comments": "", "group": "object" }, + "git_object_rawcontent_is_valid": { + "type": "function", + "file": "git2/object.h", + "line": 270, + "lineto": 274, + "args": [ + { + "name": "valid", + "type": "int *", + "comment": "Output pointer to set with validity of the object content" + }, + { + "name": "buf", + "type": "const char *", + "comment": "The contents to validate" + }, + { + "name": "len", + "type": "size_t", + "comment": "The length of the buffer" + }, + { + "name": "object_type", + "type": "git_object_t", + "comment": "The type of the object in the buffer" + } + ], + "argline": "int *valid, const char *buf, size_t len, git_object_t object_type", + "sig": "int *::const char *::size_t::git_object_t", + "return": { "type": "int", "comment": " 0 on success or an error code" }, + "description": "

Analyzes a buffer of raw object content and determines its validity.\n Tree, commit, and tag objects will be parsed and ensured that they\n are valid, parseable content. (Blobs are always valid by definition.)\n An error message will be set with an informative message if the object\n is not valid.

\n", + "comments": "", + "group": "object" + }, "git_odb_new": { "type": "function", - "file": "odb.h", - "line": 39, - "lineto": 39, + "file": "git2/odb.h", + "line": 102, + "lineto": 102, "args": [ { - "name": "out", + "name": "odb", "type": "git_odb **", - "comment": "location to store the database pointer, if opened.\n\t\t\tSet to NULL if the open failed." + "comment": "location to store the database pointer, if opened." } ], - "argline": "git_odb **out", + "argline": "git_odb **odb", "sig": "git_odb **", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a new object database with no backends.

\n", "comments": "

Before the ODB can be used for read/writing, a custom database backend must be manually added using git_odb_add_backend()

\n", "group": "odb" }, "git_odb_open": { "type": "function", - "file": "odb.h", - "line": 57, - "lineto": 57, + "file": "git2/odb.h", + "line": 120, + "lineto": 120, "args": [ { - "name": "out", + "name": "odb_out", "type": "git_odb **", "comment": "location to store the database pointer, if opened.\n\t\t\tSet to NULL if the open failed." }, @@ -10770,21 +11241,18 @@ "comment": "path of the backends' \"objects\" directory." } ], - "argline": "git_odb **out, const char *objects_dir", + "argline": "git_odb **odb_out, const char *objects_dir", "sig": "git_odb **::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a new object database and automatically add\n the two default backends:

\n", "comments": "
- git_odb_backend_loose: read and write loose object files      from disk, assuming `objects_dir` as the Objects folder\n\n- git_odb_backend_pack: read objects from packfiles,        assuming `objects_dir` as the Objects folder which      contains a 'pack/' folder with the corresponding data\n
\n", "group": "odb" }, "git_odb_add_disk_alternate": { "type": "function", - "file": "odb.h", - "line": 74, - "lineto": 74, + "file": "git2/odb.h", + "line": 138, + "lineto": 138, "args": [ { "name": "odb", @@ -10801,7 +11269,7 @@ "sig": "git_odb *::const char *", "return": { "type": "int", - "comment": " 0 on success; error code otherwise" + "comment": " 0 on success, error code otherwise" }, "description": "

Add an on-disk alternate to an existing Object DB.

\n", "comments": "

Note that the added path must point to an objects, not to a full repository, to use it as an alternate store.

\n\n

Alternate backends are always checked for objects after all the main backends have been exhausted.

\n\n

Writing is disabled on alternate backends.

\n", @@ -10809,9 +11277,9 @@ }, "git_odb_free": { "type": "function", - "file": "odb.h", - "line": 81, - "lineto": 81, + "file": "git2/odb.h", + "line": 145, + "lineto": 145, "args": [ { "name": "db", @@ -10821,30 +11289,23 @@ ], "argline": "git_odb *db", "sig": "git_odb *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an open object database.

\n", "comments": "", "group": "odb", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_odb_free-22" - ], - "general.c": [ - "ex/HEAD/general.html#git_odb_free-41" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_odb_free-20"], + "general.c": ["ex/v1.9.1/general.html#git_odb_free-41"] } }, "git_odb_read": { "type": "function", - "file": "odb.h", - "line": 100, - "lineto": 100, + "file": "git2/odb.h", + "line": 163, + "lineto": 163, "args": [ { - "name": "out", + "name": "obj", "type": "git_odb_object **", "comment": "pointer where to store the read object" }, @@ -10859,32 +11320,28 @@ "comment": "identity of the object to read." } ], - "argline": "git_odb_object **out, git_odb *db, const git_oid *id", + "argline": "git_odb_object **obj, git_odb *db, const git_oid *id", "sig": "git_odb_object **::git_odb *::const git_oid *", "return": { "type": "int", - "comment": " - 0 if the object was read;\n - GIT_ENOTFOUND if the object is not in the database." + "comment": " 0 if the object was read, GIT_ENOTFOUND if the object is\n not in the database." }, "description": "

Read an object from the database.

\n", "comments": "

This method queries all available ODB backends trying to read the given OID.

\n\n

The returned object is reference counted and internally cached, so it should be closed by the user once it's no longer in use.

\n", "group": "odb", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_odb_read-23" - ], - "general.c": [ - "ex/HEAD/general.html#git_odb_read-42" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_odb_read-21"], + "general.c": ["ex/v1.9.1/general.html#git_odb_read-42"] } }, "git_odb_read_prefix": { "type": "function", - "file": "odb.h", - "line": 129, - "lineto": 129, + "file": "git2/odb.h", + "line": 191, + "lineto": 191, "args": [ { - "name": "out", + "name": "obj", "type": "git_odb_object **", "comment": "pointer where to store the read object" }, @@ -10904,21 +11361,21 @@ "comment": "the length of the prefix" } ], - "argline": "git_odb_object **out, git_odb *db, const git_oid *short_id, size_t len", + "argline": "git_odb_object **obj, git_odb *db, const git_oid *short_id, size_t len", "sig": "git_odb_object **::git_odb *::const git_oid *::size_t", "return": { "type": "int", - "comment": " - 0 if the object was read;\n - GIT_ENOTFOUND if the object is not in the database.\n - GIT_EAMBIGUOUS if the prefix is ambiguous (several objects match the prefix)" + "comment": " 0 if the object was read, GIT_ENOTFOUND if the object is not in the\n database. GIT_EAMBIGUOUS if the prefix is ambiguous\n (several objects match the prefix)" }, "description": "

Read an object from the database, given a prefix\n of its identifier.

\n", - "comments": "

This method queries all available ODB backends trying to match the 'len' first hexadecimal characters of the 'short_id'. The remaining (GIT_OID_HEXSZ-len)*4 bits of 'short_id' must be 0s. 'len' must be at least GIT_OID_MINPREFIXLEN, and the prefix must be long enough to identify a unique object in all the backends; the method will fail otherwise.

\n\n

The returned object is reference counted and internally cached, so it should be closed by the user once it's no longer in use.

\n", + "comments": "

This method queries all available ODB backends trying to match the 'len' first hexadecimal characters of the 'short_id'. The remaining (GIT_OID_SHA1_HEXSIZE-len)*4 bits of 'short_id' must be 0s. 'len' must be at least GIT_OID_MINPREFIXLEN, and the prefix must be long enough to identify a unique object in all the backends; the method will fail otherwise.

\n\n

The returned object is reference counted and internally cached, so it should be closed by the user once it's no longer in use.

\n", "group": "odb" }, "git_odb_read_header": { "type": "function", - "file": "odb.h", - "line": 149, - "lineto": 149, + "file": "git2/odb.h", + "line": 210, + "lineto": 210, "args": [ { "name": "len_out", @@ -10927,7 +11384,7 @@ }, { "name": "type_out", - "type": "git_otype *", + "type": "git_object_t *", "comment": "pointer where to store the type" }, { @@ -10941,11 +11398,11 @@ "comment": "identity of the object to read." } ], - "argline": "size_t *len_out, git_otype *type_out, git_odb *db, const git_oid *id", - "sig": "size_t *::git_otype *::git_odb *::const git_oid *", + "argline": "size_t *len_out, git_object_t *type_out, git_odb *db, const git_oid *id", + "sig": "size_t *::git_object_t *::git_odb *::const git_oid *", "return": { "type": "int", - "comment": " - 0 if the object was read;\n - GIT_ENOTFOUND if the object is not in the database." + "comment": " 0 if the object was read, GIT_ENOTFOUND if the object is not\n in the database." }, "description": "

Read the header of an object from the database, without\n reading its full contents.

\n", "comments": "

The header includes the length and the type of an object.

\n\n

Note that most backends do not support reading only the header of an object, so the whole object will be read and then the header will be returned.

\n", @@ -10953,9 +11410,9 @@ }, "git_odb_exists": { "type": "function", - "file": "odb.h", - "line": 160, - "lineto": 160, + "file": "git2/odb.h", + "line": 219, + "lineto": 219, "args": [ { "name": "db", @@ -10972,17 +11429,49 @@ "sig": "git_odb *::const git_oid *", "return": { "type": "int", - "comment": " - 1, if the object was found\n - 0, otherwise" + "comment": " 1 if the object was found, 0 otherwise" }, "description": "

Determine if the given object can be found in the object database.

\n", "comments": "", "group": "odb" }, + "git_odb_exists_ext": { + "type": "function", + "file": "git2/odb.h", + "line": 230, + "lineto": 230, + "args": [ + { + "name": "db", + "type": "git_odb *", + "comment": "database to be searched for the given object." + }, + { + "name": "id", + "type": "const git_oid *", + "comment": "the object to search for." + }, + { + "name": "flags", + "type": "unsigned int", + "comment": "flags affecting the lookup (see `git_odb_lookup_flags_t`)" + } + ], + "argline": "git_odb *db, const git_oid *id, unsigned int flags", + "sig": "git_odb *::const git_oid *::unsigned int", + "return": { + "type": "int", + "comment": " 1 if the object was found, 0 otherwise" + }, + "description": "

Determine if the given object can be found in the object database, with\n extended options.

\n", + "comments": "", + "group": "odb" + }, "git_odb_exists_prefix": { "type": "function", - "file": "odb.h", - "line": 173, - "lineto": 174, + "file": "git2/odb.h", + "line": 243, + "lineto": 244, "args": [ { "name": "out", @@ -11017,9 +11506,9 @@ }, "git_odb_expand_ids": { "type": "function", - "file": "odb.h", - "line": 215, - "lineto": 218, + "file": "git2/odb.h", + "line": 286, + "lineto": 289, "args": [ { "name": "db", @@ -11043,24 +11532,20 @@ "type": "int", "comment": " 0 on success or an error code on failure" }, - "description": "

Determine if one or more objects can be found in the object database\n by their abbreviated object ID and type. The given array will be\n updated in place: for each abbreviated ID that is unique in the\n database, and of the given type (if specified), the full object ID,\n object ID length (GIT_OID_HEXSZ) and type will be written back to\n the array. For IDs that are not found (or are ambiguous), the\n array entry will be zeroed.

\n", - "comments": "

Note that since this function operates on multiple objects, the underlying database will not be asked to be reloaded if an object is not found (which is unlike other object database operations.)

\n", + "description": "

Determine if one or more objects can be found in the object database\n by their abbreviated object ID and type.

\n", + "comments": "

The given array will be updated in place: for each abbreviated ID that is unique in the database, and of the given type (if specified), the full object ID, object ID length (GIT_OID_SHA1_HEXSIZE) and type will be written back to the array. For IDs that are not found (or are ambiguous), the array entry will be zeroed.

\n\n

Note that since this function operates on multiple objects, the underlying database will not be asked to be reloaded if an object is not found (which is unlike other object database operations.)

\n", "group": "odb" }, "git_odb_refresh": { "type": "function", - "file": "odb.h", - "line": 238, - "lineto": 238, + "file": "git2/odb.h", + "line": 309, + "lineto": 309, "args": [ - { - "name": "db", - "type": "struct git_odb *", - "comment": "database to refresh" - } + { "name": "db", "type": "git_odb *", "comment": "database to refresh" } ], - "argline": "struct git_odb *db", - "sig": "struct git_odb *", + "argline": "git_odb *db", + "sig": "git_odb *", "return": { "type": "int", "comment": " 0 on success, error code otherwise" @@ -11071,15 +11556,11 @@ }, "git_odb_foreach": { "type": "function", - "file": "odb.h", - "line": 253, - "lineto": 253, + "file": "git2/odb.h", + "line": 324, + "lineto": 327, "args": [ - { - "name": "db", - "type": "git_odb *", - "comment": "database to use" - }, + { "name": "db", "type": "git_odb *", "comment": "database to use" }, { "name": "cb", "type": "git_odb_foreach_cb", @@ -11103,9 +11584,9 @@ }, "git_odb_write": { "type": "function", - "file": "odb.h", - "line": 273, - "lineto": 273, + "file": "git2/odb.h", + "line": 347, + "lineto": 347, "args": [ { "name": "out", @@ -11120,39 +11601,28 @@ { "name": "data", "type": "const void *", - "comment": "buffer with the data to store" - }, - { - "name": "len", - "type": "size_t", - "comment": "size of the buffer" + "comment": "`const unsigned char *` buffer with the data to store" }, + { "name": "len", "type": "size_t", "comment": "size of the buffer" }, { "name": "type", - "type": "git_otype", + "type": "git_object_t", "comment": "type of the data to store" } ], - "argline": "git_oid *out, git_odb *odb, const void *data, size_t len, git_otype type", - "sig": "git_oid *::git_odb *::const void *::size_t::git_otype", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "argline": "git_oid *out, git_odb *odb, const void *data, size_t len, git_object_t type", + "sig": "git_oid *::git_odb *::const void *::size_t::git_object_t", + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Write an object directly into the ODB

\n", "comments": "

This method writes a full object straight into the ODB. For most cases, it is preferred to write objects through a write stream, which is both faster and less memory intensive, specially for big objects.

\n\n

This method is provided for compatibility with custom backends which are not able to support streaming writes

\n", "group": "odb", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_odb_write-43" - ] - } + "examples": { "general.c": ["ex/v1.9.1/general.html#git_odb_write-43"] } }, "git_odb_open_wstream": { "type": "function", - "file": "odb.h", - "line": 296, - "lineto": 296, + "file": "git2/odb.h", + "line": 370, + "lineto": 370, "args": [ { "name": "out", @@ -11166,17 +11636,17 @@ }, { "name": "size", - "type": "git_off_t", + "type": "git_object_size_t", "comment": "final size of the object that will be written" }, { "name": "type", - "type": "git_otype", + "type": "git_object_t", "comment": "type of the object that will be written" } ], - "argline": "git_odb_stream **out, git_odb *db, git_off_t size, git_otype type", - "sig": "git_odb_stream **::git_odb *::git_off_t::git_otype", + "argline": "git_odb_stream **out, git_odb *db, git_object_size_t size, git_object_t type", + "sig": "git_odb_stream **::git_odb *::git_object_size_t::git_object_t", "return": { "type": "int", "comment": " 0 if the stream was created; error code otherwise" @@ -11187,9 +11657,9 @@ }, "git_odb_stream_write": { "type": "function", - "file": "odb.h", - "line": 309, - "lineto": 309, + "file": "git2/odb.h", + "line": 383, + "lineto": 383, "args": [ { "name": "stream", @@ -11201,17 +11671,13 @@ "type": "const char *", "comment": "the data to write" }, - { - "name": "len", - "type": "size_t", - "comment": "the buffer's length" - } + { "name": "len", "type": "size_t", "comment": "the buffer's length" } ], "argline": "git_odb_stream *stream, const char *buffer, size_t len", "sig": "git_odb_stream *::const char *::size_t", "return": { "type": "int", - "comment": " 0 if the write succeeded; error code otherwise" + "comment": " 0 if the write succeeded, error code otherwise" }, "description": "

Write to an odb stream

\n", "comments": "

This method will fail if the total number of received bytes exceeds the size declared with git_odb_open_wstream()

\n", @@ -11219,9 +11685,9 @@ }, "git_odb_stream_finalize_write": { "type": "function", - "file": "odb.h", - "line": 324, - "lineto": 324, + "file": "git2/odb.h", + "line": 398, + "lineto": 398, "args": [ { "name": "out", @@ -11238,7 +11704,7 @@ "sig": "git_oid *::git_odb_stream *", "return": { "type": "int", - "comment": " 0 on success; an error code otherwise" + "comment": " 0 on success, an error code otherwise" }, "description": "

Finish writing to an odb stream

\n", "comments": "

The object will take its final name and will be available to the odb.

\n\n

This method will fail if the total number of received bytes differs from the size declared with git_odb_open_wstream()

\n", @@ -11246,31 +11712,27 @@ }, "git_odb_stream_read": { "type": "function", - "file": "odb.h", - "line": 331, - "lineto": 331, + "file": "git2/odb.h", + "line": 410, + "lineto": 410, "args": [ { "name": "stream", "type": "git_odb_stream *", - "comment": null + "comment": "the stream" }, { "name": "buffer", "type": "char *", - "comment": null + "comment": "a user-allocated buffer to store the data in." }, - { - "name": "len", - "type": "size_t", - "comment": null - } + { "name": "len", "type": "size_t", "comment": "the buffer's length" } ], "argline": "git_odb_stream *stream, char *buffer, size_t len", "sig": "git_odb_stream *::char *::size_t", "return": { "type": "int", - "comment": null + "comment": " the number of bytes read if succeeded, error code otherwise" }, "description": "

Read from an odb stream

\n", "comments": "

Most backends don't implement streaming reads

\n", @@ -11278,9 +11740,9 @@ }, "git_odb_stream_free": { "type": "function", - "file": "odb.h", - "line": 338, - "lineto": 338, + "file": "git2/odb.h", + "line": 417, + "lineto": 417, "args": [ { "name": "stream", @@ -11290,19 +11752,16 @@ ], "argline": "git_odb_stream *stream", "sig": "git_odb_stream *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free an odb stream

\n", "comments": "", "group": "odb" }, "git_odb_open_rstream": { "type": "function", - "file": "odb.h", - "line": 366, - "lineto": 371, + "file": "git2/odb.h", + "line": 445, + "lineto": 450, "args": [ { "name": "out", @@ -11316,7 +11775,7 @@ }, { "name": "type", - "type": "git_otype *", + "type": "git_object_t *", "comment": "pointer where to store the type of the object" }, { @@ -11330,11 +11789,11 @@ "comment": "oid of the object the stream will read from" } ], - "argline": "git_odb_stream **out, size_t *len, git_otype *type, git_odb *db, const git_oid *oid", - "sig": "git_odb_stream **::size_t *::git_otype *::git_odb *::const git_oid *", + "argline": "git_odb_stream **out, size_t *len, git_object_t *type, git_odb *db, const git_oid *oid", + "sig": "git_odb_stream **::size_t *::git_object_t *::git_odb *::const git_oid *", "return": { "type": "int", - "comment": " 0 if the stream was created; error code otherwise" + "comment": " 0 if the stream was created, error code otherwise" }, "description": "

Open a stream to read an object from the ODB

\n", "comments": "

Note that most backends do not support streaming reads because they store their objects as compressed/delta'ed blobs.

\n\n

It's recommended to use git_odb_read instead, which is assured to work on all backends.

\n\n

The returned stream will be of type GIT_STREAM_RDONLY and will have the following methods:

\n\n
    - stream->read: read `n` bytes from the stream      - stream->free: free the stream\n
\n\n

The stream must always be free'd or will leak memory.

\n", @@ -11342,9 +11801,9 @@ }, "git_odb_write_pack": { "type": "function", - "file": "odb.h", - "line": 391, - "lineto": 395, + "file": "git2/odb.h", + "line": 471, + "lineto": 475, "args": [ { "name": "out", @@ -11358,7 +11817,7 @@ }, { "name": "progress_cb", - "type": "git_transfer_progress_cb", + "type": "git_indexer_progress_cb", "comment": "function to call with progress information.\n Be aware that this is called inline with network and indexing operations,\n so performance may be affected." }, { @@ -11367,61 +11826,66 @@ "comment": "payload for the progress callback" } ], - "argline": "git_odb_writepack **out, git_odb *db, git_transfer_progress_cb progress_cb, void *progress_payload", - "sig": "git_odb_writepack **::git_odb *::git_transfer_progress_cb::void *", - "return": { - "type": "int", - "comment": null - }, + "argline": "git_odb_writepack **out, git_odb *db, git_indexer_progress_cb progress_cb, void *progress_payload", + "sig": "git_odb_writepack **::git_odb *::git_indexer_progress_cb::void *", + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Open a stream for writing a pack file to the ODB.

\n", "comments": "

If the ODB layer understands pack files, then the given packfile will likely be streamed directly to disk (and a corresponding index created). If the ODB layer does not understand pack files, the objects will be stored in whatever format the ODB layer uses.

\n", "group": "odb" }, + "git_odb_write_multi_pack_index": { + "type": "function", + "file": "git2/odb.h", + "line": 489, + "lineto": 490, + "args": [ + { + "name": "db", + "type": "git_odb *", + "comment": "object database where the `multi-pack-index` file will be written." + } + ], + "argline": "git_odb *db", + "sig": "git_odb *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Write a multi-pack-index file from all the .pack files in the ODB.

\n", + "comments": "

If the ODB layer understands pack files, then this will create a file called multi-pack-index next to the .pack and .idx files, which will contain an index of all objects stored in .pack files. This will allow for O(log n) lookup for n objects (regardless of how many packfiles there exist).

\n", + "group": "odb" + }, "git_odb_hash": { "type": "function", - "file": "odb.h", - "line": 409, - "lineto": 409, + "file": "git2/odb.h", + "line": 539, + "lineto": 539, "args": [ { - "name": "out", + "name": "oid", "type": "git_oid *", "comment": "the resulting object-ID." }, + { "name": "data", "type": "const void *", "comment": "data to hash" }, + { "name": "len", "type": "size_t", "comment": "size of the data" }, { - "name": "data", - "type": "const void *", - "comment": "data to hash" - }, - { - "name": "len", - "type": "size_t", - "comment": "size of the data" - }, - { - "name": "type", - "type": "git_otype", + "name": "object_type", + "type": "git_object_t", "comment": "of the data to hash" } ], - "argline": "git_oid *out, const void *data, size_t len, git_otype type", - "sig": "git_oid *::const void *::size_t::git_otype", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Determine the object-ID (sha1 hash) of a data buffer

\n", - "comments": "

The resulting SHA-1 OID will be the identifier for the data buffer as if the data buffer it were to written to the ODB.

\n", + "argline": "git_oid *oid, const void *data, size_t len, git_object_t object_type", + "sig": "git_oid *::const void *::size_t::git_object_t", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Determine the object-ID (sha1 or sha256 hash) of a data buffer

\n", + "comments": "

The resulting OID will be the identifier for the data buffer as if the data buffer it were to written to the ODB.

\n", "group": "odb" }, "git_odb_hashfile": { "type": "function", - "file": "odb.h", - "line": 424, - "lineto": 424, + "file": "git2/odb.h", + "line": 554, + "lineto": 554, "args": [ { - "name": "out", + "name": "oid", "type": "git_oid *", "comment": "oid structure the result is written into." }, @@ -11431,26 +11895,23 @@ "comment": "file to read and determine object id for" }, { - "name": "type", - "type": "git_otype", - "comment": "the type of the object that will be hashed" + "name": "object_type", + "type": "git_object_t", + "comment": "of the data to hash" } ], - "argline": "git_oid *out, const char *path, git_otype type", - "sig": "git_oid *::const char *::git_otype", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "argline": "git_oid *oid, const char *path, git_object_t object_type", + "sig": "git_oid *::const char *::git_object_t", + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Read a file from disk and fill a git_oid with the object id\n that the file would have if it were written to the Object\n Database as an object of the given type (w/o applying filters).\n Similar functionality to git.git's git hash-object without\n the -w flag, however, with the --no-filters flag.\n If you need filters, see git_repository_hashfile.

\n", "comments": "", "group": "odb" }, "git_odb_object_dup": { "type": "function", - "file": "odb.h", - "line": 438, - "lineto": 438, + "file": "git2/odb.h", + "line": 570, + "lineto": 570, "args": [ { "name": "dest", @@ -11465,19 +11926,16 @@ ], "argline": "git_odb_object **dest, git_odb_object *source", "sig": "git_odb_object **::git_odb_object *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a copy of an odb_object

\n", "comments": "

The returned copy must be manually freed with git_odb_object_free. Note that because of an implementation detail, the returned copy will be the same pointer as source: the object is internally refcounted, so the copy still needs to be freed twice.

\n", "group": "odb" }, "git_odb_object_free": { "type": "function", - "file": "odb.h", - "line": 448, - "lineto": 448, + "file": "git2/odb.h", + "line": 580, + "lineto": 580, "args": [ { "name": "object", @@ -11487,27 +11945,20 @@ ], "argline": "git_odb_object *object", "sig": "git_odb_object *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an ODB object

\n", "comments": "

This method must always be called once a git_odb_object is no longer needed, otherwise memory will leak.

\n", "group": "odb", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_odb_object_free-24" - ], - "general.c": [ - "ex/HEAD/general.html#git_odb_object_free-44" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_odb_object_free-22"], + "general.c": ["ex/v1.9.1/general.html#git_odb_object_free-44"] } }, "git_odb_object_id": { "type": "function", - "file": "odb.h", - "line": 458, - "lineto": 458, + "file": "git2/odb.h", + "line": 590, + "lineto": 590, "args": [ { "name": "object", @@ -11527,9 +11978,9 @@ }, "git_odb_object_data": { "type": "function", - "file": "odb.h", - "line": 471, - "lineto": 471, + "file": "git2/odb.h", + "line": 603, + "lineto": 603, "args": [ { "name": "object", @@ -11541,22 +11992,20 @@ "sig": "git_odb_object *", "return": { "type": "const void *", - "comment": " a pointer to the data" + "comment": " \n\n `const unsigned char *` a pointer to the data" }, "description": "

Return the data of an ODB object

\n", "comments": "

This is the uncompressed, raw data as read from the ODB, without the leading header.

\n\n

This pointer is owned by the object and shall not be free'd.

\n", "group": "odb", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_odb_object_data-45" - ] + "general.c": ["ex/v1.9.1/general.html#git_odb_object_data-45"] } }, "git_odb_object_size": { "type": "function", - "file": "odb.h", - "line": 482, - "lineto": 482, + "file": "git2/odb.h", + "line": 614, + "lineto": 614, "args": [ { "name": "object", @@ -11566,27 +12015,20 @@ ], "argline": "git_odb_object *object", "sig": "git_odb_object *", - "return": { - "type": "size_t", - "comment": " the size" - }, + "return": { "type": "size_t", "comment": " the size" }, "description": "

Return the size of an ODB object

\n", "comments": "

This is the real size of the data buffer, not the actual size of the object.

\n", "group": "odb", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_odb_object_size-25" - ], - "general.c": [ - "ex/HEAD/general.html#git_odb_object_size-46" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_odb_object_size-23"], + "general.c": ["ex/v1.9.1/general.html#git_odb_object_size-46"] } }, "git_odb_object_type": { "type": "function", - "file": "odb.h", - "line": 490, - "lineto": 490, + "file": "git2/odb.h", + "line": 622, + "lineto": 622, "args": [ { "name": "object", @@ -11596,24 +12038,19 @@ ], "argline": "git_odb_object *object", "sig": "git_odb_object *", - "return": { - "type": "git_otype", - "comment": " the type" - }, + "return": { "type": "git_object_t", "comment": " the type" }, "description": "

Return the type of an ODB object

\n", "comments": "", "group": "odb", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_odb_object_type-47" - ] + "general.c": ["ex/v1.9.1/general.html#git_odb_object_type-47"] } }, "git_odb_add_backend": { "type": "function", - "file": "odb.h", - "line": 505, - "lineto": 505, + "file": "git2/odb.h", + "line": 637, + "lineto": 637, "args": [ { "name": "odb", @@ -11635,7 +12072,7 @@ "sig": "git_odb *::git_odb_backend *::int", "return": { "type": "int", - "comment": " 0 on success; error code otherwise" + "comment": " 0 on success, error code otherwise" }, "description": "

Add a custom backend to an existing Object DB

\n", "comments": "

The backends are checked in relative ordering, based on the value of the priority parameter.

\n\n

Read for more information.

\n", @@ -11643,9 +12080,9 @@ }, "git_odb_add_alternate": { "type": "function", - "file": "odb.h", - "line": 526, - "lineto": 526, + "file": "git2/odb.h", + "line": 658, + "lineto": 658, "args": [ { "name": "odb", @@ -11667,7 +12104,7 @@ "sig": "git_odb *::git_odb_backend *::int", "return": { "type": "int", - "comment": " 0 on success; error code otherwise" + "comment": " 0 on success, error code otherwise" }, "description": "

Add a custom backend to an existing Object DB; this\n backend will work as an alternate.

\n", "comments": "

Alternate backends are always checked for objects after all the main backends have been exhausted.

\n\n

The backends are checked in relative ordering, based on the value of the priority parameter.

\n\n

Writing is disabled on alternate backends.

\n\n

Read for more information.

\n", @@ -11675,15 +12112,11 @@ }, "git_odb_num_backends": { "type": "function", - "file": "odb.h", - "line": 534, - "lineto": 534, + "file": "git2/odb.h", + "line": 666, + "lineto": 666, "args": [ - { - "name": "odb", - "type": "git_odb *", - "comment": "object database" - } + { "name": "odb", "type": "git_odb *", "comment": "object database" } ], "argline": "git_odb *odb", "sig": "git_odb *", @@ -11697,20 +12130,16 @@ }, "git_odb_get_backend": { "type": "function", - "file": "odb.h", - "line": 544, - "lineto": 544, + "file": "git2/odb.h", + "line": 676, + "lineto": 676, "args": [ { "name": "out", "type": "git_odb_backend **", "comment": "output pointer to ODB backend at pos" }, - { - "name": "odb", - "type": "git_odb *", - "comment": "object database" - }, + { "name": "odb", "type": "git_odb *", "comment": "object database" }, { "name": "pos", "type": "size_t", @@ -11721,17 +12150,40 @@ "sig": "git_odb_backend **::git_odb *::size_t", "return": { "type": "int", - "comment": " 0 on success; GIT_ENOTFOUND if pos is invalid; other errors \n<\n 0" + "comment": " 0 on success, GIT_ENOTFOUND if pos is invalid, other errors \n<\n 0" }, "description": "

Lookup an ODB backend object by index

\n", "comments": "", "group": "odb" }, + "git_odb_set_commit_graph": { + "type": "function", + "file": "git2/odb.h", + "line": 691, + "lineto": 691, + "args": [ + { "name": "odb", "type": "git_odb *", "comment": "object database" }, + { + "name": "cgraph", + "type": "git_commit_graph *", + "comment": "the git commit-graph" + } + ], + "argline": "git_odb *odb, git_commit_graph *cgraph", + "sig": "git_odb *::git_commit_graph *", + "return": { + "type": "int", + "comment": " 0 on success; error code otherwise" + }, + "description": "

Set the git commit-graph for the ODB.

\n", + "comments": "

After a successful call, the ownership of the cgraph parameter will be transferred to libgit2, and the caller should not free it.

\n\n

The commit-graph can also be unset by explicitly passing NULL as the cgraph parameter.

\n", + "group": "odb" + }, "git_odb_backend_pack": { "type": "function", - "file": "odb_backend.h", - "line": 34, - "lineto": 34, + "file": "git2/odb_backend.h", + "line": 142, + "lineto": 144, "args": [ { "name": "out", @@ -11746,19 +12198,40 @@ ], "argline": "git_odb_backend **out, const char *objects_dir", "sig": "git_odb_backend **::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create a backend for the packfiles.

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a backend for a directory containing packfiles.

\n", "comments": "", "group": "odb" }, + "git_odb_backend_one_pack": { + "type": "function", + "file": "git2/odb_backend.h", + "line": 156, + "lineto": 158, + "args": [ + { + "name": "out", + "type": "git_odb_backend **", + "comment": "location to store the odb backend pointer" + }, + { + "name": "index_file", + "type": "const char *", + "comment": "path to the packfile's .idx file" + } + ], + "argline": "git_odb_backend **out, const char *index_file", + "sig": "git_odb_backend **::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a backend out of a single packfile

\n", + "comments": "

This can be useful for inspecting the contents of a single packfile.

\n", + "group": "odb" + }, "git_odb_backend_loose": { "type": "function", - "file": "odb_backend.h", - "line": 48, - "lineto": 54, + "file": "git2/odb_backend.h", + "line": 171, + "lineto": 177, "args": [ { "name": "out", @@ -11773,66 +12246,36 @@ { "name": "compression_level", "type": "int", - "comment": "zlib compression level to use" + "comment": "zlib compression level (0-9), or -1 for the default" }, { "name": "do_fsync", "type": "int", - "comment": "whether to do an fsync() after writing" + "comment": "if non-zero, perform an fsync on write" }, { "name": "dir_mode", "type": "unsigned int", - "comment": "permissions to use creating a directory or 0 for defaults" + "comment": "permission to use when creating directories, or 0 for default" }, { "name": "file_mode", "type": "unsigned int", - "comment": "permissions to use creating a file or 0 for defaults" + "comment": "permission to use when creating directories, or 0 for default" } ], "argline": "git_odb_backend **out, const char *objects_dir, int compression_level, int do_fsync, unsigned int dir_mode, unsigned int file_mode", "sig": "git_odb_backend **::const char *::int::int::unsigned int::unsigned int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a backend for loose objects

\n", "comments": "", "group": "odb" }, - "git_odb_backend_one_pack": { - "type": "function", - "file": "odb_backend.h", - "line": 67, - "lineto": 67, - "args": [ - { - "name": "out", - "type": "git_odb_backend **", - "comment": "location to store the odb backend pointer" - }, - { - "name": "index_file", - "type": "const char *", - "comment": "path to the packfile's .idx file" - } - ], - "argline": "git_odb_backend **out, const char *index_file", - "sig": "git_odb_backend **::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create a backend out of a single packfile

\n", - "comments": "

This can be useful for inspecting the contents of a single packfile.

\n", - "group": "odb" - }, "git_oid_fromstr": { "type": "function", - "file": "oid.h", - "line": 47, - "lineto": 47, + "file": "git2/oid.h", + "line": 137, + "lineto": 137, "args": [ { "name": "out", @@ -11842,39 +12285,41 @@ { "name": "str", "type": "const char *", - "comment": "input hex string; must be pointing at the start of\n\t\tthe hex sequence and have at least the number of bytes\n\t\tneeded for an oid encoded in hex (40 bytes)." + "comment": "input hex string; must be pointing at the start of\n\t\tthe hex sequence and have at least the number of bytes\n\t\tneeded for an oid encoded in hex (40 bytes for sha1,\n\t\t256 bytes for sha256)." } ], "argline": "git_oid *out, const char *str", "sig": "git_oid *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Parse a hex formatted object id into a git_oid.

\n", - "comments": "", + "comments": "

The appropriate number of bytes for the given object ID type will be read from the string - 40 bytes for SHA1, 64 bytes for SHA256. The given string need not be NUL terminated.

\n", "group": "oid", "examples": { "general.c": [ - "ex/HEAD/general.html#git_oid_fromstr-48", - "ex/HEAD/general.html#git_oid_fromstr-49", - "ex/HEAD/general.html#git_oid_fromstr-50", - "ex/HEAD/general.html#git_oid_fromstr-51", - "ex/HEAD/general.html#git_oid_fromstr-52", - "ex/HEAD/general.html#git_oid_fromstr-53", - "ex/HEAD/general.html#git_oid_fromstr-54", - "ex/HEAD/general.html#git_oid_fromstr-55" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_oid_fromstr-23" + "ex/v1.9.1/general.html#git_oid_fromstr-48", + "ex/v1.9.1/general.html#git_oid_fromstr-49", + "ex/v1.9.1/general.html#git_oid_fromstr-50", + "ex/v1.9.1/general.html#git_oid_fromstr-51", + "ex/v1.9.1/general.html#git_oid_fromstr-52", + "ex/v1.9.1/general.html#git_oid_fromstr-53", + "ex/v1.9.1/general.html#git_oid_fromstr-54", + "ex/v1.9.1/general.html#git_oid_fromstr-55", + "ex/v1.9.1/general.html#git_oid_fromstr-56", + "ex/v1.9.1/general.html#git_oid_fromstr-57", + "ex/v1.9.1/general.html#git_oid_fromstr-58", + "ex/v1.9.1/general.html#git_oid_fromstr-59", + "ex/v1.9.1/general.html#git_oid_fromstr-60", + "ex/v1.9.1/general.html#git_oid_fromstr-61", + "ex/v1.9.1/general.html#git_oid_fromstr-62", + "ex/v1.9.1/general.html#git_oid_fromstr-63" ] } }, "git_oid_fromstrp": { "type": "function", - "file": "oid.h", - "line": 56, - "lineto": 56, + "file": "git2/oid.h", + "line": 146, + "lineto": 146, "args": [ { "name": "out", @@ -11889,19 +12334,16 @@ ], "argline": "git_oid *out, const char *str", "sig": "git_oid *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Parse a hex formatted null-terminated string into a git_oid.

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Parse a hex formatted NUL-terminated string into a git_oid.

\n", "comments": "", "group": "oid" }, "git_oid_fromstrn": { "type": "function", - "file": "oid.h", - "line": 69, - "lineto": 69, + "file": "git2/oid.h", + "line": 159, + "lineto": 159, "args": [ { "name": "out", @@ -11921,19 +12363,16 @@ ], "argline": "git_oid *out, const char *str, size_t length", "sig": "git_oid *::const char *::size_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Parse N characters of a hex formatted object id into a git_oid.

\n", "comments": "

If N is odd, the last byte's high nibble will be read in and the low nibble set to zero.

\n", "group": "oid" }, "git_oid_fromraw": { "type": "function", - "file": "oid.h", - "line": 77, - "lineto": 77, + "file": "git2/oid.h", + "line": 168, + "lineto": 168, "args": [ { "name": "out", @@ -11948,24 +12387,21 @@ ], "argline": "git_oid *out, const unsigned char *raw", "sig": "git_oid *::const unsigned char *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success or error code" }, "description": "

Copy an already raw oid into a git_oid structure.

\n", "comments": "", "group": "oid" }, "git_oid_fmt": { "type": "function", - "file": "oid.h", - "line": 89, - "lineto": 89, + "file": "git2/oid.h", + "line": 184, + "lineto": 184, "args": [ { "name": "out", "type": "char *", - "comment": "output hex string; must be pointing at the start of\n\t\tthe hex sequence and have at least the number of bytes\n\t\tneeded for an oid encoded in hex (40 bytes). Only the\n\t\toid digits are written; a '\n\\\n0' terminator must be added\n\t\tby the caller if it is required." + "comment": "output hex string; must be pointing at the start of\n\t\tthe hex sequence and have at least the number of bytes\n\t\tneeded for an oid encoded in hex (40 bytes for SHA1,\n\t\t64 bytes for SHA256). Only the oid digits are written;\n\t\ta '\n\\\n0' terminator must be added by the caller if it is\n\t\trequired." }, { "name": "id", @@ -11975,44 +12411,35 @@ ], "argline": "char *out, const git_oid *id", "sig": "char *::const git_oid *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success or error code" }, "description": "

Format a git_oid into a hex string.

\n", "comments": "", "group": "oid", "examples": { + "fetch.c": [ + "ex/v1.9.1/fetch.html#git_oid_fmt-1", + "ex/v1.9.1/fetch.html#git_oid_fmt-2" + ], "general.c": [ - "ex/HEAD/general.html#git_oid_fmt-56", - "ex/HEAD/general.html#git_oid_fmt-57", - "ex/HEAD/general.html#git_oid_fmt-58", - "ex/HEAD/general.html#git_oid_fmt-59", - "ex/HEAD/general.html#git_oid_fmt-60", - "ex/HEAD/general.html#git_oid_fmt-61" - ], - "network/fetch.c": [ - "ex/HEAD/network/fetch.html#git_oid_fmt-1", - "ex/HEAD/network/fetch.html#git_oid_fmt-2" - ], - "network/index-pack.c": [ - "ex/HEAD/network/index-pack.html#git_oid_fmt-6" - ], - "network/ls-remote.c": [ - "ex/HEAD/network/ls-remote.html#git_oid_fmt-1" - ] + "ex/v1.9.1/general.html#git_oid_fmt-64", + "ex/v1.9.1/general.html#git_oid_fmt-65", + "ex/v1.9.1/general.html#git_oid_fmt-66", + "ex/v1.9.1/general.html#git_oid_fmt-67", + "ex/v1.9.1/general.html#git_oid_fmt-68" + ], + "ls-remote.c": ["ex/v1.9.1/ls-remote.html#git_oid_fmt-1"] } }, "git_oid_nfmt": { "type": "function", - "file": "oid.h", - "line": 100, - "lineto": 100, + "file": "git2/oid.h", + "line": 196, + "lineto": 196, "args": [ { "name": "out", "type": "char *", - "comment": "output hex string; you say how many bytes to write.\n\t\tIf the number of bytes is > GIT_OID_HEXSZ, extra bytes\n\t\twill be zeroed; if not, a '\n\\\n0' terminator is NOT added." + "comment": "output hex string; you say how many bytes to write.\n\t\tIf the number of bytes is > GIT_OID_SHA1_HEXSIZE, extra bytes\n\t\twill be zeroed; if not, a '\n\\\n0' terminator is NOT added." }, { "name": "n", @@ -12027,24 +12454,21 @@ ], "argline": "char *out, size_t n, const git_oid *id", "sig": "char *::size_t::const git_oid *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success or error code" }, "description": "

Format a git_oid into a partial hex string.

\n", "comments": "", "group": "oid" }, "git_oid_pathfmt": { "type": "function", - "file": "oid.h", - "line": 115, - "lineto": 115, + "file": "git2/oid.h", + "line": 213, + "lineto": 213, "args": [ { "name": "out", "type": "char *", - "comment": "output hex string; must be pointing at the start of\n\t\tthe hex sequence and have at least the number of bytes\n\t\tneeded for an oid encoded in hex (41 bytes). Only the\n\t\toid digits are written; a '\n\\\n0' terminator must be added\n\t\tby the caller if it is required." + "comment": "output hex string; must be pointing at the start of\n\t\tthe hex sequence and have at least the number of bytes\n\t\tneeded for an oid encoded in hex (41 bytes for SHA1,\n\t\t65 bytes for SHA256). Only the oid digits are written;\n\t\ta '\n\\\n0' terminator must be added by the caller if it\n\t\tis required." }, { "name": "id", @@ -12055,8 +12479,8 @@ "argline": "char *out, const git_oid *id", "sig": "char *::const git_oid *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 on success, non-zero callback return value, or error code" }, "description": "

Format a git_oid into a loose-object path string.

\n", "comments": "

The resulting string is "aa/...", where "aa" is the first two hex digits of the oid and "..." is the remaining 38 digits.

\n", @@ -12064,9 +12488,9 @@ }, "git_oid_tostr_s": { "type": "function", - "file": "oid.h", - "line": 128, - "lineto": 128, + "file": "git2/oid.h", + "line": 226, + "lineto": 226, "args": [ { "name": "oid", @@ -12078,23 +12502,23 @@ "sig": "const git_oid *", "return": { "type": "char *", - "comment": " the c-string" + "comment": " the c-string or NULL on failure" }, "description": "

Format a git_oid into a statically allocated c-string.

\n", "comments": "

The c-string is owned by the library and should not be freed by the user. If libgit2 is built with thread support, the string will be stored in TLS (i.e. one buffer per thread) to allow for concurrent calls of the function.

\n", "group": "oid", "examples": { "merge.c": [ - "ex/HEAD/merge.html#git_oid_tostr_s-24", - "ex/HEAD/merge.html#git_oid_tostr_s-25" + "ex/v1.9.1/merge.html#git_oid_tostr_s-19", + "ex/v1.9.1/merge.html#git_oid_tostr_s-20" ] } }, "git_oid_tostr": { "type": "function", - "file": "oid.h", - "line": 147, - "lineto": 147, + "file": "git2/oid.h", + "line": 247, + "lineto": 247, "args": [ { "name": "out", @@ -12119,37 +12543,37 @@ "comment": " the out buffer pointer, assuming no input parameter\n\t\t\terrors, otherwise a pointer to an empty string." }, "description": "

Format a git_oid into a buffer as a hex format c-string.

\n", - "comments": "

If the buffer is smaller than GIT_OID_HEXSZ+1, then the resulting oid c-string will be truncated to n-1 characters (but will still be NUL-byte terminated).

\n\n

If there are any input parameter errors (out == NULL, n == 0, oid == NULL), then a pointer to an empty string is returned, so that the return value can always be printed.

\n", + "comments": "

If the buffer is smaller than the size of a hex-formatted oid string plus an additional byte (GIT_OID_SHA_HEXSIZE + 1 for SHA1 or GIT_OID_SHA256_HEXSIZE + 1 for SHA256), then the resulting oid c-string will be truncated to n-1 characters (but will still be NUL-byte terminated).

\n\n

If there are any input parameter errors (out == NULL, n == 0, oid == NULL), then a pointer to an empty string is returned, so that the return value can always be printed.

\n", "group": "oid", "examples": { "blame.c": [ - "ex/HEAD/blame.html#git_oid_tostr-18", - "ex/HEAD/blame.html#git_oid_tostr-19" + "ex/v1.9.1/blame.html#git_oid_tostr-15", + "ex/v1.9.1/blame.html#git_oid_tostr-16" ], "cat-file.c": [ - "ex/HEAD/cat-file.html#git_oid_tostr-26", - "ex/HEAD/cat-file.html#git_oid_tostr-27", - "ex/HEAD/cat-file.html#git_oid_tostr-28", - "ex/HEAD/cat-file.html#git_oid_tostr-29", - "ex/HEAD/cat-file.html#git_oid_tostr-30" + "ex/v1.9.1/cat-file.html#git_oid_tostr-24", + "ex/v1.9.1/cat-file.html#git_oid_tostr-25", + "ex/v1.9.1/cat-file.html#git_oid_tostr-26", + "ex/v1.9.1/cat-file.html#git_oid_tostr-27", + "ex/v1.9.1/cat-file.html#git_oid_tostr-28" ], "log.c": [ - "ex/HEAD/log.html#git_oid_tostr-40", - "ex/HEAD/log.html#git_oid_tostr-41" + "ex/v1.9.1/log.html#git_oid_tostr-38", + "ex/v1.9.1/log.html#git_oid_tostr-39" ], "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_oid_tostr-12", - "ex/HEAD/rev-parse.html#git_oid_tostr-13", - "ex/HEAD/rev-parse.html#git_oid_tostr-14", - "ex/HEAD/rev-parse.html#git_oid_tostr-15" + "ex/v1.9.1/rev-parse.html#git_oid_tostr-10", + "ex/v1.9.1/rev-parse.html#git_oid_tostr-11", + "ex/v1.9.1/rev-parse.html#git_oid_tostr-12", + "ex/v1.9.1/rev-parse.html#git_oid_tostr-13" ] } }, "git_oid_cpy": { "type": "function", - "file": "oid.h", - "line": 155, - "lineto": 155, + "file": "git2/oid.h", + "line": 256, + "lineto": 256, "args": [ { "name": "out", @@ -12164,26 +12588,23 @@ ], "argline": "git_oid *out, const git_oid *src", "sig": "git_oid *::const git_oid *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success or error code" }, "description": "

Copy an oid from one structure to another.

\n", "comments": "", "group": "oid", "examples": { "blame.c": [ - "ex/HEAD/blame.html#git_oid_cpy-20", - "ex/HEAD/blame.html#git_oid_cpy-21", - "ex/HEAD/blame.html#git_oid_cpy-22" + "ex/v1.9.1/blame.html#git_oid_cpy-17", + "ex/v1.9.1/blame.html#git_oid_cpy-18", + "ex/v1.9.1/blame.html#git_oid_cpy-19" ] } }, "git_oid_cmp": { "type": "function", - "file": "oid.h", - "line": 164, - "lineto": 164, + "file": "git2/oid.h", + "line": 265, + "lineto": 265, "args": [ { "name": "a", @@ -12208,9 +12629,9 @@ }, "git_oid_equal": { "type": "function", - "file": "oid.h", - "line": 173, - "lineto": 173, + "file": "git2/oid.h", + "line": 274, + "lineto": 274, "args": [ { "name": "a", @@ -12225,19 +12646,16 @@ ], "argline": "const git_oid *a, const git_oid *b", "sig": "const git_oid *::const git_oid *", - "return": { - "type": "int", - "comment": " true if equal, false otherwise" - }, + "return": { "type": "int", "comment": " true if equal, false otherwise" }, "description": "

Compare two oid structures for equality

\n", "comments": "", "group": "oid" }, "git_oid_ncmp": { "type": "function", - "file": "oid.h", - "line": 184, - "lineto": 184, + "file": "git2/oid.h", + "line": 285, + "lineto": 285, "args": [ { "name": "a", @@ -12257,19 +12675,16 @@ ], "argline": "const git_oid *a, const git_oid *b, size_t len", "sig": "const git_oid *::const git_oid *::size_t", - "return": { - "type": "int", - "comment": " 0 in case of a match" - }, + "return": { "type": "int", "comment": " 0 in case of a match" }, "description": "

Compare the first 'len' hexadecimal characters (packets of 4 bits)\n of two oid structures.

\n", "comments": "", "group": "oid" }, "git_oid_streq": { "type": "function", - "file": "oid.h", - "line": 193, - "lineto": 193, + "file": "git2/oid.h", + "line": 294, + "lineto": 294, "args": [ { "name": "id", @@ -12294,9 +12709,9 @@ }, "git_oid_strcmp": { "type": "function", - "file": "oid.h", - "line": 203, - "lineto": 203, + "file": "git2/oid.h", + "line": 304, + "lineto": 304, "args": [ { "name": "id", @@ -12319,41 +12734,34 @@ "comments": "", "group": "oid" }, - "git_oid_iszero": { + "git_oid_is_zero": { "type": "function", - "file": "oid.h", - "line": 210, - "lineto": 210, + "file": "git2/oid.h", + "line": 312, + "lineto": 312, "args": [ { "name": "id", "type": "const git_oid *", - "comment": null + "comment": "the object ID to check" } ], "argline": "const git_oid *id", "sig": "const git_oid *", - "return": { - "type": "int", - "comment": " 1 if all zeros, 0 otherwise." - }, + "return": { "type": "int", "comment": " 1 if all zeros, 0 otherwise." }, "description": "

Check is an oid is all zeros.

\n", "comments": "", "group": "oid", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_oid_iszero-23" - ], - "network/fetch.c": [ - "ex/HEAD/network/fetch.html#git_oid_iszero-3" - ] + "blame.c": ["ex/v1.9.1/blame.html#git_oid_is_zero-20"], + "fetch.c": ["ex/v1.9.1/fetch.html#git_oid_is_zero-3"] } }, "git_oid_shorten_new": { "type": "function", - "file": "oid.h", - "line": 231, - "lineto": 231, + "file": "git2/oid.h", + "line": 333, + "lineto": 333, "args": [ { "name": "min_length", @@ -12373,9 +12781,9 @@ }, "git_oid_shorten_add": { "type": "function", - "file": "oid.h", - "line": 257, - "lineto": 257, + "file": "git2/oid.h", + "line": 359, + "lineto": 359, "args": [ { "name": "os", @@ -12395,14 +12803,14 @@ "comment": " the minimal length to uniquely identify all OIDs\n\t\tadded so far to the set; or an error code (\n<\n0) if an\n\t\terror occurs." }, "description": "

Add a new OID to set of shortened OIDs and calculate\n the minimal length to uniquely identify all the OIDs in\n the set.

\n", - "comments": "

The OID is expected to be a 40-char hexadecimal string. The OID is owned by the user and will not be modified or freed.

\n\n

For performance reasons, there is a hard-limit of how many OIDs can be added to a single set (around ~32000, assuming a mostly randomized distribution), which should be enough for any kind of program, and keeps the algorithm fast and memory-efficient.

\n\n

Attempting to add more than those OIDs will result in a GITERR_INVALID error

\n", + "comments": "

The OID is expected to be a 40-char hexadecimal string. The OID is owned by the user and will not be modified or freed.

\n\n

For performance reasons, there is a hard-limit of how many OIDs can be added to a single set (around ~32000, assuming a mostly randomized distribution), which should be enough for any kind of program, and keeps the algorithm fast and memory-efficient.

\n\n

Attempting to add more than those OIDs will result in a GIT_ERROR_INVALID error

\n", "group": "oid" }, "git_oid_shorten_free": { "type": "function", - "file": "oid.h", - "line": 264, - "lineto": 264, + "file": "git2/oid.h", + "line": 366, + "lineto": 366, "args": [ { "name": "os", @@ -12412,19 +12820,16 @@ ], "argline": "git_oid_shorten *os", "sig": "git_oid_shorten *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free an OID shortener instance

\n", "comments": "", "group": "oid" }, - "git_oidarray_free": { + "git_oidarray_dispose": { "type": "function", - "file": "oidarray.h", - "line": 34, - "lineto": 34, + "file": "git2/oidarray.h", + "line": 38, + "lineto": 38, "args": [ { "name": "array", @@ -12434,19 +12839,16 @@ ], "argline": "git_oidarray *array", "sig": "git_oidarray *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Free the OID array

\n", - "comments": "

This method must (and must only) be called on git_oidarray objects where the array is allocated by the library. Not doing so, will result in a memory leak.

\n\n

This does not free the git_oidarray itself, since the library will never allocate that object directly itself (it is more commonly embedded inside another struct or created on the stack).

\n", + "return": { "type": "void", "comment": null }, + "description": "

Free the object IDs contained in an oid_array. This method should\n be called on git_oidarray objects that were provided by the\n library. Not doing so will result in a memory leak.

\n", + "comments": "

This does not free the git_oidarray itself, since the library will never allocate that object directly itself.

\n", "group": "oidarray" }, "git_packbuilder_new": { "type": "function", - "file": "pack.h", - "line": 64, - "lineto": 64, + "file": "git2/pack.h", + "line": 65, + "lineto": 65, "args": [ { "name": "out", @@ -12461,19 +12863,16 @@ ], "argline": "git_packbuilder **out, git_repository *repo", "sig": "git_packbuilder **::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Initialize a new packbuilder

\n", "comments": "", "group": "packbuilder" }, "git_packbuilder_set_threads": { "type": "function", - "file": "pack.h", - "line": 77, - "lineto": 77, + "file": "git2/pack.h", + "line": 78, + "lineto": 78, "args": [ { "name": "pb", @@ -12498,9 +12897,9 @@ }, "git_packbuilder_insert": { "type": "function", - "file": "pack.h", - "line": 91, - "lineto": 91, + "file": "git2/pack.h", + "line": 92, + "lineto": 92, "args": [ { "name": "pb", @@ -12520,19 +12919,16 @@ ], "argline": "git_packbuilder *pb, const git_oid *id, const char *name", "sig": "git_packbuilder *::const git_oid *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Insert a single object

\n", "comments": "

For an optimal pack it's mandatory to insert objects in recency order, commits followed by trees and blobs.

\n", "group": "packbuilder" }, "git_packbuilder_insert_tree": { "type": "function", - "file": "pack.h", - "line": 103, - "lineto": 103, + "file": "git2/pack.h", + "line": 104, + "lineto": 104, "args": [ { "name": "pb", @@ -12547,19 +12943,16 @@ ], "argline": "git_packbuilder *pb, const git_oid *id", "sig": "git_packbuilder *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Insert a root tree object

\n", "comments": "

This will add the tree as well as all referenced trees and blobs.

\n", "group": "packbuilder" }, "git_packbuilder_insert_commit": { "type": "function", - "file": "pack.h", - "line": 115, - "lineto": 115, + "file": "git2/pack.h", + "line": 116, + "lineto": 116, "args": [ { "name": "pb", @@ -12574,19 +12967,16 @@ ], "argline": "git_packbuilder *pb, const git_oid *id", "sig": "git_packbuilder *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Insert a commit object

\n", "comments": "

This will add a commit as well as the completed referenced tree.

\n", "group": "packbuilder" }, "git_packbuilder_insert_walk": { "type": "function", - "file": "pack.h", - "line": 128, - "lineto": 128, + "file": "git2/pack.h", + "line": 129, + "lineto": 129, "args": [ { "name": "pb", @@ -12601,19 +12991,16 @@ ], "argline": "git_packbuilder *pb, git_revwalk *walk", "sig": "git_packbuilder *::git_revwalk *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Insert objects as given by the walk

\n", "comments": "

Those commits and all objects they reference will be inserted into the packbuilder.

\n", "group": "packbuilder" }, "git_packbuilder_insert_recur": { "type": "function", - "file": "pack.h", - "line": 140, - "lineto": 140, + "file": "git2/pack.h", + "line": 141, + "lineto": 141, "args": [ { "name": "pb", @@ -12633,19 +13020,40 @@ ], "argline": "git_packbuilder *pb, const git_oid *id, const char *name", "sig": "git_packbuilder *::const git_oid *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Recursively insert an object and its referenced objects

\n", "comments": "

Insert the object as well as any object it references.

\n", "group": "packbuilder" }, - "git_packbuilder_write": { + "git_packbuilder_write_buf": { "type": "function", - "file": "pack.h", - "line": 164, - "lineto": 169, + "file": "git2/pack.h", + "line": 153, + "lineto": 153, + "args": [ + { + "name": "buf", + "type": "git_buf *", + "comment": "Buffer where to write the packfile" + }, + { + "name": "pb", + "type": "git_packbuilder *", + "comment": "The packbuilder" + } + ], + "argline": "git_buf *buf, git_packbuilder *pb", + "sig": "git_buf *::git_packbuilder *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Write the contents of the packfile to an in-memory buffer

\n", + "comments": "

The contents of the buffer will become a valid packfile, even though there will be no attached index

\n", + "group": "packbuilder" + }, + "git_packbuilder_write": { + "type": "function", + "file": "git2/pack.h", + "line": 166, + "lineto": 171, "args": [ { "name": "pb", @@ -12655,7 +13063,7 @@ { "name": "path", "type": "const char *", - "comment": "to the directory where the packfile and index should be stored" + "comment": "Path to the directory where the packfile and index should be stored, or NULL for default location" }, { "name": "mode", @@ -12664,7 +13072,7 @@ }, { "name": "progress_cb", - "type": "git_transfer_progress_cb", + "type": "git_indexer_progress_cb", "comment": "function to call with progress information from the indexer (optional)" }, { @@ -12673,21 +13081,18 @@ "comment": "payload for the progress callback (optional)" } ], - "argline": "git_packbuilder *pb, const char *path, unsigned int mode, git_transfer_progress_cb progress_cb, void *progress_cb_payload", - "sig": "git_packbuilder *::const char *::unsigned int::git_transfer_progress_cb::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "argline": "git_packbuilder *pb, const char *path, unsigned int mode, git_indexer_progress_cb progress_cb, void *progress_cb_payload", + "sig": "git_packbuilder *::const char *::unsigned int::git_indexer_progress_cb::void *", + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Write the new pack and corresponding index file to path.

\n", "comments": "", "group": "packbuilder" }, "git_packbuilder_hash": { "type": "function", - "file": "pack.h", - "line": 179, - "lineto": 179, + "file": "git2/pack.h", + "line": 184, + "lineto": 184, "args": [ { "name": "pb", @@ -12697,19 +13102,38 @@ ], "argline": "git_packbuilder *pb", "sig": "git_packbuilder *", - "return": { - "type": "const git_oid *", - "comment": null - }, + "return": { "type": "const git_oid *", "comment": " 0 or an error code" }, "description": "

Get the packfile's hash

\n", "comments": "

A packfile's name is derived from the sorted hashing of all object names. This is only correct after the packfile has been written.

\n", "group": "packbuilder" }, + "git_packbuilder_name": { + "type": "function", + "file": "git2/pack.h", + "line": 196, + "lineto": 196, + "args": [ + { + "name": "pb", + "type": "git_packbuilder *", + "comment": "the packbuilder instance" + } + ], + "argline": "git_packbuilder *pb", + "sig": "git_packbuilder *", + "return": { + "type": "const char *", + "comment": " a NUL terminated string for the packfile name" + }, + "description": "

Get the unique name for the resulting packfile.

\n", + "comments": "

The packfile's name is derived from the packfile's content. This is only correct after the packfile has been written.

\n", + "group": "packbuilder" + }, "git_packbuilder_foreach": { "type": "function", - "file": "pack.h", - "line": 191, - "lineto": 191, + "file": "git2/pack.h", + "line": 218, + "lineto": 218, "args": [ { "name": "pb", @@ -12729,19 +13153,16 @@ ], "argline": "git_packbuilder *pb, git_packbuilder_foreach_cb cb, void *payload", "sig": "git_packbuilder *::git_packbuilder_foreach_cb::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create the new pack and pass each object to the callback

\n", "comments": "", "group": "packbuilder" }, "git_packbuilder_object_count": { "type": "function", - "file": "pack.h", - "line": 199, - "lineto": 199, + "file": "git2/pack.h", + "line": 226, + "lineto": 226, "args": [ { "name": "pb", @@ -12761,9 +13182,9 @@ }, "git_packbuilder_written": { "type": "function", - "file": "pack.h", - "line": 207, - "lineto": 207, + "file": "git2/pack.h", + "line": 234, + "lineto": 234, "args": [ { "name": "pb", @@ -12783,9 +13204,9 @@ }, "git_packbuilder_set_callbacks": { "type": "function", - "file": "pack.h", - "line": 226, - "lineto": 229, + "file": "git2/pack.h", + "line": 264, + "lineto": 267, "args": [ { "name": "pb", @@ -12795,7 +13216,7 @@ { "name": "progress_cb", "type": "git_packbuilder_progress", - "comment": "Function to call with progress information during\n pack building. Be aware that this is called inline with pack building\n operations, so performance may be affected." + "comment": "Function to call with progress information during\n pack building. Be aware that this is called inline with pack building\n operations, so performance may be affected.\n When progress_cb returns an error, the pack building process will be\n aborted and the error will be returned from the invoked function.\n `pb` must then be freed." }, { "name": "progress_cb_payload", @@ -12805,19 +13226,16 @@ ], "argline": "git_packbuilder *pb, git_packbuilder_progress progress_cb, void *progress_cb_payload", "sig": "git_packbuilder *::git_packbuilder_progress::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Set the callbacks for a packbuilder

\n", "comments": "", "group": "packbuilder" }, "git_packbuilder_free": { "type": "function", - "file": "pack.h", - "line": 236, - "lineto": 236, + "file": "git2/pack.h", + "line": 274, + "lineto": 274, "args": [ { "name": "pb", @@ -12827,35 +13245,42 @@ ], "argline": "git_packbuilder *pb", "sig": "git_packbuilder *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free the packbuilder and all associated data

\n", "comments": "", "group": "packbuilder" }, + "git_patch_owner": { + "type": "function", + "file": "git2/patch.h", + "line": 37, + "lineto": 37, + "args": [ + { "name": "patch", "type": "const git_patch *", "comment": "the patch" } + ], + "argline": "const git_patch *patch", + "sig": "const git_patch *", + "return": { + "type": "git_repository *", + "comment": " a pointer to the repository" + }, + "description": "

Get the repository associated with this patch. May be NULL.

\n", + "comments": "", + "group": "patch" + }, "git_patch_from_diff": { "type": "function", - "file": "patch.h", - "line": 51, - "lineto": 52, + "file": "git2/patch.h", + "line": 59, + "lineto": 60, "args": [ { "name": "out", "type": "git_patch **", "comment": "Output parameter for the delta patch object" }, - { - "name": "diff", - "type": "git_diff *", - "comment": "Diff list object" - }, - { - "name": "idx", - "type": "size_t", - "comment": "Index into diff list" - } + { "name": "diff", "type": "git_diff *", "comment": "Diff list object" }, + { "name": "idx", "type": "size_t", "comment": "Index into diff list" } ], "argline": "git_patch **out, git_diff *diff, size_t idx", "sig": "git_patch **::git_diff *::size_t", @@ -12869,9 +13294,9 @@ }, "git_patch_from_blobs": { "type": "function", - "file": "patch.h", - "line": 70, - "lineto": 76, + "file": "git2/patch.h", + "line": 78, + "lineto": 84, "args": [ { "name": "out", @@ -12916,9 +13341,9 @@ }, "git_patch_from_blob_and_buffer": { "type": "function", - "file": "patch.h", - "line": 95, - "lineto": 102, + "file": "git2/patch.h", + "line": 103, + "lineto": 110, "args": [ { "name": "out", @@ -12968,9 +13393,9 @@ }, "git_patch_from_buffers": { "type": "function", - "file": "patch.h", - "line": 122, - "lineto": 130, + "file": "git2/patch.h", + "line": 130, + "lineto": 138, "args": [ { "name": "out", @@ -13021,47 +13446,48 @@ }, "description": "

Directly generate a patch from the difference between two buffers.

\n", "comments": "

This is just like git_diff_buffers() except it generates a patch object for the difference instead of directly making callbacks. You can use the standard git_patch accessor functions to read the patch data, and you must call git_patch_free() on the patch when done.

\n", - "group": "patch" + "group": "patch", + "examples": { + "diff.c": ["ex/v1.9.1/diff.html#git_patch_from_buffers-16"] + } }, "git_patch_free": { "type": "function", - "file": "patch.h", - "line": 135, - "lineto": 135, + "file": "git2/patch.h", + "line": 145, + "lineto": 145, "args": [ { "name": "patch", "type": "git_patch *", - "comment": null + "comment": "The patch to free." } ], "argline": "git_patch *patch", "sig": "git_patch *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a git_patch object.

\n", "comments": "", - "group": "patch" + "group": "patch", + "examples": { "diff.c": ["ex/v1.9.1/diff.html#git_patch_free-17"] } }, "git_patch_get_delta": { "type": "function", - "file": "patch.h", - "line": 141, - "lineto": 141, + "file": "git2/patch.h", + "line": 154, + "lineto": 154, "args": [ { "name": "patch", "type": "const git_patch *", - "comment": null + "comment": "The patch in which to get the delta." } ], "argline": "const git_patch *patch", "sig": "const git_patch *", "return": { "type": "const git_diff_delta *", - "comment": null + "comment": " The delta associated with the patch." }, "description": "

Get the delta associated with a patch. This delta points to internal\n data and you do not have to release it when you are done with it.

\n", "comments": "", @@ -13069,21 +13495,21 @@ }, "git_patch_num_hunks": { "type": "function", - "file": "patch.h", - "line": 146, - "lineto": 146, + "file": "git2/patch.h", + "line": 162, + "lineto": 162, "args": [ { "name": "patch", "type": "const git_patch *", - "comment": null + "comment": "The patch in which to get the number of hunks." } ], "argline": "const git_patch *patch", "sig": "const git_patch *", "return": { "type": "size_t", - "comment": null + "comment": " The number of hunks of the patch." }, "description": "

Get the number of hunks in a patch

\n", "comments": "", @@ -13091,9 +13517,9 @@ }, "git_patch_line_stats": { "type": "function", - "file": "patch.h", - "line": 164, - "lineto": 168, + "file": "git2/patch.h", + "line": 180, + "lineto": 184, "args": [ { "name": "total_context", @@ -13118,19 +13544,16 @@ ], "argline": "size_t *total_context, size_t *total_additions, size_t *total_deletions, const git_patch *patch", "sig": "size_t *::size_t *::size_t *::const git_patch *", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on error" - }, + "return": { "type": "int", "comment": " 0 on success, \n<\n0 on error" }, "description": "

Get line counts of each type in a patch.

\n", "comments": "

This helps imitate a diff --numstat type of output. For that purpose, you only need the total_additions and total_deletions values, but we include the total_context line count in case you want the total number of lines of diff output that will be generated.

\n\n

All outputs are optional. Pass NULL if you don't need a particular count.

\n", "group": "patch" }, "git_patch_get_hunk": { "type": "function", - "file": "patch.h", - "line": 183, - "lineto": 187, + "file": "git2/patch.h", + "line": 199, + "lineto": 203, "args": [ { "name": "out", @@ -13165,20 +13588,16 @@ }, "git_patch_num_lines_in_hunk": { "type": "function", - "file": "patch.h", - "line": 196, - "lineto": 198, + "file": "git2/patch.h", + "line": 212, + "lineto": 214, "args": [ { "name": "patch", "type": "const git_patch *", "comment": "The git_patch object" }, - { - "name": "hunk_idx", - "type": "size_t", - "comment": "Index of the hunk" - } + { "name": "hunk_idx", "type": "size_t", "comment": "Index of the hunk" } ], "argline": "const git_patch *patch, size_t hunk_idx", "sig": "const git_patch *::size_t", @@ -13192,9 +13611,9 @@ }, "git_patch_get_line_in_hunk": { "type": "function", - "file": "patch.h", - "line": 214, - "lineto": 218, + "file": "git2/patch.h", + "line": 230, + "lineto": 234, "args": [ { "name": "out", @@ -13229,9 +13648,9 @@ }, "git_patch_size": { "type": "function", - "file": "patch.h", - "line": 236, - "lineto": 240, + "file": "git2/patch.h", + "line": 252, + "lineto": 256, "args": [ { "name": "patch", @@ -13256,19 +13675,16 @@ ], "argline": "git_patch *patch, int include_context, int include_hunk_headers, int include_file_headers", "sig": "git_patch *::int::int::int", - "return": { - "type": "size_t", - "comment": " The number of bytes of data" - }, + "return": { "type": "size_t", "comment": " The number of bytes of data" }, "description": "

Look up size of patch diff data in bytes

\n", "comments": "

This returns the raw size of the patch data. This only includes the actual data from the lines of the diff, not the file or hunk headers.

\n\n

If you pass include_context as true (non-zero), this will be the size of all of the diff output; if you pass it as false (zero), this will only include the actual changed lines (as if context_lines was 0).

\n", "group": "patch" }, "git_patch_print": { "type": "function", - "file": "patch.h", - "line": 254, - "lineto": 257, + "file": "git2/patch.h", + "line": 270, + "lineto": 273, "args": [ { "name": "patch", @@ -13298,9 +13714,9 @@ }, "git_patch_to_buf": { "type": "function", - "file": "patch.h", - "line": 266, - "lineto": 268, + "file": "git2/patch.h", + "line": 282, + "lineto": 284, "args": [ { "name": "out", @@ -13321,13 +13737,14 @@ }, "description": "

Get the content of a patch as a single diff text.

\n", "comments": "", - "group": "patch" + "group": "patch", + "examples": { "diff.c": ["ex/v1.9.1/diff.html#git_patch_to_buf-18"] } }, "git_pathspec_new": { "type": "function", - "file": "pathspec.h", - "line": 82, - "lineto": 83, + "file": "git2/pathspec.h", + "line": 89, + "lineto": 90, "args": [ { "name": "out", @@ -13349,17 +13766,13 @@ "description": "

Compile a pathspec

\n", "comments": "", "group": "pathspec", - "examples": { - "log.c": [ - "ex/HEAD/log.html#git_pathspec_new-42" - ] - } + "examples": { "log.c": ["ex/v1.9.1/log.html#git_pathspec_new-40"] } }, "git_pathspec_free": { "type": "function", - "file": "pathspec.h", - "line": 90, - "lineto": 90, + "file": "git2/pathspec.h", + "line": 97, + "lineto": 97, "args": [ { "name": "ps", @@ -13369,24 +13782,17 @@ ], "argline": "git_pathspec *ps", "sig": "git_pathspec *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a pathspec

\n", "comments": "", "group": "pathspec", - "examples": { - "log.c": [ - "ex/HEAD/log.html#git_pathspec_free-43" - ] - } + "examples": { "log.c": ["ex/v1.9.1/log.html#git_pathspec_free-41"] } }, "git_pathspec_matches_path": { "type": "function", - "file": "pathspec.h", - "line": 105, - "lineto": 106, + "file": "git2/pathspec.h", + "line": 112, + "lineto": 113, "args": [ { "name": "ps", @@ -13416,9 +13822,9 @@ }, "git_pathspec_match_workdir": { "type": "function", - "file": "pathspec.h", - "line": 130, - "lineto": 134, + "file": "git2/pathspec.h", + "line": 137, + "lineto": 141, "args": [ { "name": "out", @@ -13453,9 +13859,9 @@ }, "git_pathspec_match_index": { "type": "function", - "file": "pathspec.h", - "line": 159, - "lineto": 163, + "file": "git2/pathspec.h", + "line": 166, + "lineto": 170, "args": [ { "name": "out", @@ -13490,9 +13896,9 @@ }, "git_pathspec_match_tree": { "type": "function", - "file": "pathspec.h", - "line": 183, - "lineto": 187, + "file": "git2/pathspec.h", + "line": 190, + "lineto": 194, "args": [ { "name": "out", @@ -13524,17 +13930,13 @@ "description": "

Match a pathspec against files in a tree.

\n", "comments": "

This matches the pathspec against the files in the given tree.

\n\n

If out is not NULL, this returns a git_patchspec_match_list. That contains the list of all matched filenames (unless you pass the GIT_PATHSPEC_FAILURES_ONLY flag) and may also contain the list of pathspecs with no match (if you used the GIT_PATHSPEC_FIND_FAILURES flag). You must call git_pathspec_match_list_free() on this object.

\n", "group": "pathspec", - "examples": { - "log.c": [ - "ex/HEAD/log.html#git_pathspec_match_tree-44" - ] - } + "examples": { "log.c": ["ex/v1.9.1/log.html#git_pathspec_match_tree-42"] } }, "git_pathspec_match_diff": { "type": "function", - "file": "pathspec.h", - "line": 207, - "lineto": 211, + "file": "git2/pathspec.h", + "line": 214, + "lineto": 218, "args": [ { "name": "out", @@ -13569,9 +13971,9 @@ }, "git_pathspec_match_list_free": { "type": "function", - "file": "pathspec.h", - "line": 218, - "lineto": 218, + "file": "git2/pathspec.h", + "line": 225, + "lineto": 225, "args": [ { "name": "m", @@ -13581,19 +13983,16 @@ ], "argline": "git_pathspec_match_list *m", "sig": "git_pathspec_match_list *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free memory associates with a git_pathspec_match_list

\n", "comments": "", "group": "pathspec" }, "git_pathspec_match_list_entrycount": { "type": "function", - "file": "pathspec.h", - "line": 226, - "lineto": 227, + "file": "git2/pathspec.h", + "line": 233, + "lineto": 234, "args": [ { "name": "m", @@ -13613,9 +14012,9 @@ }, "git_pathspec_match_list_entry": { "type": "function", - "file": "pathspec.h", - "line": 239, - "lineto": 240, + "file": "git2/pathspec.h", + "line": 246, + "lineto": 247, "args": [ { "name": "m", @@ -13640,9 +14039,9 @@ }, "git_pathspec_match_list_diff_entry": { "type": "function", - "file": "pathspec.h", - "line": 252, - "lineto": 253, + "file": "git2/pathspec.h", + "line": 259, + "lineto": 260, "args": [ { "name": "m", @@ -13667,9 +14066,9 @@ }, "git_pathspec_match_list_failed_entrycount": { "type": "function", - "file": "pathspec.h", - "line": 264, - "lineto": 265, + "file": "git2/pathspec.h", + "line": 271, + "lineto": 272, "args": [ { "name": "m", @@ -13689,9 +14088,9 @@ }, "git_pathspec_match_list_failed_entry": { "type": "function", - "file": "pathspec.h", - "line": 276, - "lineto": 277, + "file": "git2/pathspec.h", + "line": 283, + "lineto": 284, "args": [ { "name": "m", @@ -13714,48 +14113,48 @@ "comments": "

This will be return NULL for positions out of range.

\n", "group": "pathspec" }, - "git_proxy_init_options": { + "git_proxy_options_init": { "type": "function", - "file": "proxy.h", - "line": 88, - "lineto": 88, + "file": "git2/proxy.h", + "line": 103, + "lineto": 103, "args": [ { "name": "opts", "type": "git_proxy_options *", - "comment": "the options struct to initialize" + "comment": "The `git_proxy_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "the version of the struct, use `GIT_PROXY_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_PROXY_OPTIONS_VERSION`." } ], "argline": "git_proxy_options *opts, unsigned int version", "sig": "git_proxy_options *::unsigned int", "return": { "type": "int", - "comment": null + "comment": " Zero on success; -1 on failure." }, - "description": "

Initialize a proxy options structure

\n", - "comments": "", + "description": "

Initialize git_proxy_options structure

\n", + "comments": "

Initializes a git_proxy_options with default values. Equivalent to creating an instance with GIT_PROXY_OPTIONS_INIT.

\n", "group": "proxy" }, - "git_rebase_init_options": { + "git_rebase_options_init": { "type": "function", - "file": "rebase.h", - "line": 156, - "lineto": 158, + "file": "git2/rebase.h", + "line": 201, + "lineto": 203, "args": [ { "name": "opts", "type": "git_rebase_options *", - "comment": "the `git_rebase_options` instance to initialize." + "comment": "The `git_rebase_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "the version of the struct; you should pass\n `GIT_REBASE_OPTIONS_VERSION` here." + "comment": "The struct version; pass `GIT_REBASE_OPTIONS_VERSION`." } ], "argline": "git_rebase_options *opts, unsigned int version", @@ -13764,15 +14163,15 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_rebase_options with default values. Equivalent to\n creating an instance with GIT_REBASE_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_rebase_options structure

\n", + "comments": "

Initializes a git_rebase_options with default values. Equivalent to creating an instance with GIT_REBASE_OPTIONS_INIT.

\n", "group": "rebase" }, "git_rebase_init": { "type": "function", - "file": "rebase.h", - "line": 177, - "lineto": 183, + "file": "git2/rebase.h", + "line": 222, + "lineto": 228, "args": [ { "name": "out", @@ -13817,9 +14216,9 @@ }, "git_rebase_open": { "type": "function", - "file": "rebase.h", - "line": 194, - "lineto": 197, + "file": "git2/rebase.h", + "line": 239, + "lineto": 242, "args": [ { "name": "out", @@ -13847,55 +14246,137 @@ "comments": "", "group": "rebase" }, - "git_rebase_operation_entrycount": { + "git_rebase_orig_head_name": { "type": "function", - "file": "rebase.h", - "line": 205, - "lineto": 205, + "file": "git2/rebase.h", + "line": 250, + "lineto": 250, "args": [ { "name": "rebase", "type": "git_rebase *", - "comment": "The in-progress rebase" + "comment": "The in-progress rebase." } ], "argline": "git_rebase *rebase", "sig": "git_rebase *", "return": { - "type": "size_t", - "comment": " The number of rebase operations in total" + "type": "const char *", + "comment": " The original `HEAD` ref name" }, - "description": "

Gets the count of rebase operations that are to be applied.

\n", + "description": "

Gets the original HEAD ref name for merge rebases.

\n", "comments": "", "group": "rebase" }, - "git_rebase_operation_current": { + "git_rebase_orig_head_id": { "type": "function", - "file": "rebase.h", - "line": 216, - "lineto": 216, + "file": "git2/rebase.h", + "line": 258, + "lineto": 258, "args": [ { "name": "rebase", "type": "git_rebase *", - "comment": "The in-progress rebase" + "comment": "The in-progress rebase." } ], "argline": "git_rebase *rebase", "sig": "git_rebase *", "return": { - "type": "size_t", - "comment": " The index of the rebase operation currently being applied." + "type": "const git_oid *", + "comment": " The original `HEAD` id" }, - "description": "

Gets the index of the rebase operation that is currently being applied.\n If the first operation has not yet been applied (because you have\n called init but not yet next) then this returns\n GIT_REBASE_NO_OPERATION.

\n", + "description": "

Gets the original HEAD id for merge rebases.

\n", "comments": "", "group": "rebase" }, - "git_rebase_operation_byindex": { + "git_rebase_onto_name": { "type": "function", - "file": "rebase.h", - "line": 225, - "lineto": 227, + "file": "git2/rebase.h", + "line": 266, + "lineto": 266, + "args": [ + { + "name": "rebase", + "type": "git_rebase *", + "comment": "The in-progress rebase." + } + ], + "argline": "git_rebase *rebase", + "sig": "git_rebase *", + "return": { "type": "const char *", "comment": " The `onto` ref name" }, + "description": "

Gets the onto ref name for merge rebases.

\n", + "comments": "", + "group": "rebase" + }, + "git_rebase_onto_id": { + "type": "function", + "file": "git2/rebase.h", + "line": 274, + "lineto": 274, + "args": [ + { + "name": "rebase", + "type": "git_rebase *", + "comment": "The in-progress rebase." + } + ], + "argline": "git_rebase *rebase", + "sig": "git_rebase *", + "return": { "type": "const git_oid *", "comment": " The `onto` id" }, + "description": "

Gets the onto id for merge rebases.

\n", + "comments": "", + "group": "rebase" + }, + "git_rebase_operation_entrycount": { + "type": "function", + "file": "git2/rebase.h", + "line": 282, + "lineto": 282, + "args": [ + { + "name": "rebase", + "type": "git_rebase *", + "comment": "The in-progress rebase" + } + ], + "argline": "git_rebase *rebase", + "sig": "git_rebase *", + "return": { + "type": "size_t", + "comment": " The number of rebase operations in total" + }, + "description": "

Gets the count of rebase operations that are to be applied.

\n", + "comments": "", + "group": "rebase" + }, + "git_rebase_operation_current": { + "type": "function", + "file": "git2/rebase.h", + "line": 293, + "lineto": 293, + "args": [ + { + "name": "rebase", + "type": "git_rebase *", + "comment": "The in-progress rebase" + } + ], + "argline": "git_rebase *rebase", + "sig": "git_rebase *", + "return": { + "type": "size_t", + "comment": " The index of the rebase operation currently being applied." + }, + "description": "

Gets the index of the rebase operation that is currently being applied.\n If the first operation has not yet been applied (because you have\n called init but not yet next) then this returns\n GIT_REBASE_NO_OPERATION.

\n", + "comments": "", + "group": "rebase" + }, + "git_rebase_operation_byindex": { + "type": "function", + "file": "git2/rebase.h", + "line": 302, + "lineto": 304, "args": [ { "name": "rebase", @@ -13920,9 +14401,9 @@ }, "git_rebase_next": { "type": "function", - "file": "rebase.h", - "line": 240, - "lineto": 242, + "file": "git2/rebase.h", + "line": 317, + "lineto": 319, "args": [ { "name": "operation", @@ -13947,36 +14428,33 @@ }, "git_rebase_inmemory_index": { "type": "function", - "file": "rebase.h", - "line": 255, - "lineto": 257, + "file": "git2/rebase.h", + "line": 336, + "lineto": 338, "args": [ { "name": "index", "type": "git_index **", - "comment": null + "comment": "The result index of the last operation." }, { "name": "rebase", "type": "git_rebase *", - "comment": null + "comment": "The in-progress rebase." } ], "argline": "git_index **index, git_rebase *rebase", "sig": "git_index **::git_rebase *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Gets the index produced by the last operation, which is the result\n of git_rebase_next and which will be committed by the next\n invocation of git_rebase_commit. This is useful for resolving\n conflicts in an in-memory rebase before committing them. You must\n call git_index_free when you are finished with this.

\n", "comments": "

This is only applicable for in-memory rebases; for rebases within a working directory, the changes were applied to the repository's index.

\n", "group": "rebase" }, "git_rebase_commit": { "type": "function", - "file": "rebase.h", - "line": 281, - "lineto": 287, + "file": "git2/rebase.h", + "line": 362, + "lineto": 368, "args": [ { "name": "id", @@ -14021,9 +14499,9 @@ }, "git_rebase_abort": { "type": "function", - "file": "rebase.h", - "line": 297, - "lineto": 297, + "file": "git2/rebase.h", + "line": 378, + "lineto": 378, "args": [ { "name": "rebase", @@ -14043,9 +14521,9 @@ }, "git_rebase_finish": { "type": "function", - "file": "rebase.h", - "line": 307, - "lineto": 309, + "file": "git2/rebase.h", + "line": 388, + "lineto": 390, "args": [ { "name": "rebase", @@ -14060,19 +14538,16 @@ ], "argline": "git_rebase *rebase, const git_signature *signature", "sig": "git_rebase *::const git_signature *", - "return": { - "type": "int", - "comment": " Zero on success; -1 on error" - }, + "return": { "type": "int", "comment": " Zero on success; -1 on error" }, "description": "

Finishes a rebase that is currently in progress once all patches have\n been applied.

\n", "comments": "", "group": "rebase" }, "git_rebase_free": { "type": "function", - "file": "rebase.h", - "line": 316, - "lineto": 316, + "file": "git2/rebase.h", + "line": 397, + "lineto": 397, "args": [ { "name": "rebase", @@ -14082,17 +14557,14 @@ ], "argline": "git_rebase *rebase", "sig": "git_rebase *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Frees the git_rebase object.

\n", "comments": "", "group": "rebase" }, "git_refdb_new": { "type": "function", - "file": "refdb.h", + "file": "git2/refdb.h", "line": 35, "lineto": 35, "args": [ @@ -14109,17 +14581,14 @@ ], "argline": "git_refdb **out, git_repository *repo", "sig": "git_refdb **::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a new reference database with no backends.

\n", "comments": "

Before the Ref DB can be used for read/writing, a custom database backend must be manually set using git_refdb_set_backend()

\n", "group": "refdb" }, "git_refdb_open": { "type": "function", - "file": "refdb.h", + "file": "git2/refdb.h", "line": 49, "lineto": 49, "args": [ @@ -14136,41 +14605,35 @@ ], "argline": "git_refdb **out, git_repository *repo", "sig": "git_refdb **::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a new reference database and automatically add\n the default backends:

\n", "comments": "\n", "group": "refdb" }, "git_refdb_compress": { "type": "function", - "file": "refdb.h", - "line": 56, - "lineto": 56, + "file": "git2/refdb.h", + "line": 59, + "lineto": 59, "args": [ { "name": "refdb", "type": "git_refdb *", - "comment": null + "comment": "The reference database to optimize." } ], "argline": "git_refdb *refdb", "sig": "git_refdb *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Suggests that the given refdb compress or optimize its references.\n This mechanism is implementation specific. For on-disk reference\n databases, for example, this may pack all loose references.

\n", "comments": "", "group": "refdb" }, "git_refdb_free": { "type": "function", - "file": "refdb.h", - "line": 63, - "lineto": 63, + "file": "git2/refdb.h", + "line": 66, + "lineto": 66, "args": [ { "name": "refdb", @@ -14180,17 +14643,14 @@ ], "argline": "git_refdb *refdb", "sig": "git_refdb *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an open reference database.

\n", "comments": "", "group": "refdb" }, "git_reflog_read": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 38, "lineto": 38, "args": [ @@ -14202,7 +14662,7 @@ { "name": "repo", "type": "git_repository *", - "comment": "the repostiory" + "comment": "the repository" }, { "name": "name", @@ -14212,17 +14672,14 @@ ], "argline": "git_reflog **out, git_repository *repo, const char *name", "sig": "git_reflog **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Read the reflog for the given reference

\n", "comments": "

If there is no reflog file for the given reference yet, an empty reflog object will be returned.

\n\n

The reflog must be freed manually by using git_reflog_free().

\n", "group": "reflog" }, "git_reflog_write": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 47, "lineto": 47, "args": [ @@ -14234,17 +14691,14 @@ ], "argline": "git_reflog *reflog", "sig": "git_reflog *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Write an existing in-memory reflog object back to disk\n using an atomic file lock.

\n", "comments": "", "group": "reflog" }, "git_reflog_append": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 60, "lineto": 60, "args": [ @@ -14271,17 +14725,14 @@ ], "argline": "git_reflog *reflog, const git_oid *id, const git_signature *committer, const char *msg", "sig": "git_reflog *::const git_oid *::const git_signature *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Add a new entry to the in-memory reflog.

\n", "comments": "

msg is optional and can be NULL.

\n", "group": "reflog" }, "git_reflog_rename": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 75, "lineto": 75, "args": [ @@ -14313,7 +14764,7 @@ }, "git_reflog_delete": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 84, "lineto": 84, "args": [ @@ -14330,17 +14781,14 @@ ], "argline": "git_repository *repo, const char *name", "sig": "git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Delete the reflog for the given reference

\n", "comments": "", "group": "reflog" }, "git_reflog_entrycount": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 92, "lineto": 92, "args": [ @@ -14352,17 +14800,14 @@ ], "argline": "git_reflog *reflog", "sig": "git_reflog *", - "return": { - "type": "size_t", - "comment": " the number of log entries" - }, + "return": { "type": "size_t", "comment": " the number of log entries" }, "description": "

Get the number of log entries in a reflog

\n", "comments": "", "group": "reflog" }, "git_reflog_entry_byindex": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 105, "lineto": 105, "args": [ @@ -14389,7 +14834,7 @@ }, "git_reflog_drop": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 124, "lineto": 127, "args": [ @@ -14421,7 +14866,7 @@ }, "git_reflog_entry_id_old": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 135, "lineto": 135, "args": [ @@ -14433,17 +14878,14 @@ ], "argline": "const git_reflog_entry *entry", "sig": "const git_reflog_entry *", - "return": { - "type": "const git_oid *", - "comment": " the old oid" - }, + "return": { "type": "const git_oid *", "comment": " the old oid" }, "description": "

Get the old oid

\n", "comments": "", "group": "reflog" }, "git_reflog_entry_id_new": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 143, "lineto": 143, "args": [ @@ -14465,7 +14907,7 @@ }, "git_reflog_entry_committer": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 151, "lineto": 151, "args": [ @@ -14487,7 +14929,7 @@ }, "git_reflog_entry_message": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 159, "lineto": 159, "args": [ @@ -14499,17 +14941,14 @@ ], "argline": "const git_reflog_entry *entry", "sig": "const git_reflog_entry *", - "return": { - "type": "const char *", - "comment": " the log msg" - }, + "return": { "type": "const char *", "comment": " the log msg" }, "description": "

Get the log message

\n", "comments": "", "group": "reflog" }, "git_reflog_free": { "type": "function", - "file": "reflog.h", + "file": "git2/reflog.h", "line": 166, "lineto": 166, "args": [ @@ -14521,17 +14960,14 @@ ], "argline": "git_reflog *reflog", "sig": "git_reflog *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free the reflog

\n", "comments": "", "group": "reflog" }, "git_reference_lookup": { "type": "function", - "file": "refs.h", + "file": "git2/refs.h", "line": 37, "lineto": 37, "args": [ @@ -14561,17 +14997,17 @@ "comments": "

The returned reference must be freed by the user.

\n\n

The name will be checked for validity. See git_reference_symbolic_create() for rules about valid names.

\n", "group": "reference", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_reference_lookup-62" + "checkout.c": [ + "ex/v1.9.1/checkout.html#git_reference_lookup-15", + "ex/v1.9.1/checkout.html#git_reference_lookup-16" ], - "merge.c": [ - "ex/HEAD/merge.html#git_reference_lookup-26" - ] + "general.c": ["ex/v1.9.1/general.html#git_reference_lookup-69"], + "merge.c": ["ex/v1.9.1/merge.html#git_reference_lookup-21"] } }, "git_reference_name_to_id": { "type": "function", - "file": "refs.h", + "file": "git2/refs.h", "line": 54, "lineto": 55, "args": [ @@ -14603,7 +15039,7 @@ }, "git_reference_dwim": { "type": "function", - "file": "refs.h", + "file": "git2/refs.h", "line": 68, "lineto": 68, "args": [ @@ -14625,25 +15061,17 @@ ], "argline": "git_reference **out, git_repository *repo, const char *shorthand", "sig": "git_reference **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a reference by DWIMing its short name

\n", - "comments": "

Apply the git precendence rules to the given shorthand to determine which reference the user is referring to.

\n", + "comments": "

Apply the git precedence rules to the given shorthand to determine which reference the user is referring to.

\n", "group": "reference", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_reference_dwim-27", - "ex/HEAD/merge.html#git_reference_dwim-28" - ] - } + "examples": { "merge.c": ["ex/v1.9.1/merge.html#git_reference_dwim-22"] } }, "git_reference_symbolic_create_matching": { "type": "function", - "file": "refs.h", - "line": 109, - "lineto": 109, + "file": "git2/refs.h", + "line": 112, + "lineto": 112, "args": [ { "name": "out", @@ -14688,14 +15116,14 @@ "comment": " 0 on success, GIT_EEXISTS, GIT_EINVALIDSPEC, GIT_EMODIFIED or an error code" }, "description": "

Conditionally create a new symbolic reference.

\n", - "comments": "

A symbolic reference is a reference name that refers to another reference name. If the other name moves, the symbolic name will move, too. As a simple example, the "HEAD" reference might refer to "refs/heads/master" while on the "master" branch of a repository.

\n\n

The symbolic reference will be created in the repository and written to the disk. The generated reference object must be freed by the user.

\n\n

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n\n

This function will return an error if a reference already exists with the given name unless force is true, in which case it will be overwritten.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and it does not have a reflog.

\n\n

It will return GIT_EMODIFIED if the reference's value at the time of updating does not match the one passed through current_value (i.e. if the ref has changed since the user read it).

\n", + "comments": "

A symbolic reference is a reference name that refers to another reference name. If the other name moves, the symbolic name will move, too. As a simple example, the "HEAD" reference might refer to "refs/heads/master" while on the "master" branch of a repository.

\n\n

The symbolic reference will be created in the repository and written to the disk. The generated reference object must be freed by the user.

\n\n

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n\n

This function will return an error if a reference already exists with the given name unless force is true, in which case it will be overwritten.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and it does not have a reflog.

\n\n

It will return GIT_EMODIFIED if the reference's value at the time of updating does not match the one passed through current_value (i.e. if the ref has changed since the user read it).

\n\n

If current_value is all zeros, this function will return GIT_EMODIFIED if the ref already exists.

\n", "group": "reference" }, "git_reference_symbolic_create": { "type": "function", - "file": "refs.h", - "line": 145, - "lineto": 145, + "file": "git2/refs.h", + "line": 148, + "lineto": 148, "args": [ { "name": "out", @@ -14740,9 +15168,9 @@ }, "git_reference_create": { "type": "function", - "file": "refs.h", - "line": 182, - "lineto": 182, + "file": "git2/refs.h", + "line": 185, + "lineto": 185, "args": [ { "name": "out", @@ -14782,19 +15210,17 @@ "comment": " 0 on success, GIT_EEXISTS, GIT_EINVALIDSPEC or an error code" }, "description": "

Create a new direct reference.

\n", - "comments": "

A direct reference (also called an object id reference) refers directly to a specific object id (a.k.a. OID or SHA) in the repository. The id permanently refers to the object (although the reference itself can be moved). For example, in libgit2 the direct ref "refs/tags/v0.17.0" refers to OID 5b9fac39d8a76b9139667c26a63e6b3f204b3977.

\n\n

The direct reference will be created in the repository and written to the disk. The generated reference object must be freed by the user.

\n\n

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n\n

This function will return an error if a reference already exists with the given name unless force is true, in which case it will be overwritten.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and and it does not have a reflog.

\n", + "comments": "

A direct reference (also called an object id reference) refers directly to a specific object id (a.k.a. OID or SHA) in the repository. The id permanently refers to the object (although the reference itself can be moved). For example, in libgit2 the direct ref "refs/tags/v0.17.0" refers to OID 5b9fac39d8a76b9139667c26a63e6b3f204b3977.

\n\n

The direct reference will be created in the repository and written to the disk. The generated reference object must be freed by the user.

\n\n

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n\n

This function will return an error if a reference already exists with the given name unless force is true, in which case it will be overwritten.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and it does not have a reflog.

\n", "group": "reference", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_reference_create-29" - ] + "merge.c": ["ex/v1.9.1/merge.html#git_reference_create-23"] } }, "git_reference_create_matching": { "type": "function", - "file": "refs.h", - "line": 225, - "lineto": 225, + "file": "git2/refs.h", + "line": 228, + "lineto": 228, "args": [ { "name": "out", @@ -14839,14 +15265,14 @@ "comment": " 0 on success, GIT_EMODIFIED if the value of the reference\n has changed, GIT_EEXISTS, GIT_EINVALIDSPEC or an error code" }, "description": "

Conditionally create new direct reference

\n", - "comments": "

A direct reference (also called an object id reference) refers directly to a specific object id (a.k.a. OID or SHA) in the repository. The id permanently refers to the object (although the reference itself can be moved). For example, in libgit2 the direct ref "refs/tags/v0.17.0" refers to OID 5b9fac39d8a76b9139667c26a63e6b3f204b3977.

\n\n

The direct reference will be created in the repository and written to the disk. The generated reference object must be freed by the user.

\n\n

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n\n

This function will return an error if a reference already exists with the given name unless force is true, in which case it will be overwritten.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and and it does not have a reflog.

\n\n

It will return GIT_EMODIFIED if the reference's value at the time of updating does not match the one passed through current_id (i.e. if the ref has changed since the user read it).

\n", + "comments": "

A direct reference (also called an object id reference) refers directly to a specific object id (a.k.a. OID or SHA) in the repository. The id permanently refers to the object (although the reference itself can be moved). For example, in libgit2 the direct ref "refs/tags/v0.17.0" refers to OID 5b9fac39d8a76b9139667c26a63e6b3f204b3977.

\n\n

The direct reference will be created in the repository and written to the disk. The generated reference object must be freed by the user.

\n\n

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n\n

This function will return an error if a reference already exists with the given name unless force is true, in which case it will be overwritten.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and it does not have a reflog.

\n\n

It will return GIT_EMODIFIED if the reference's value at the time of updating does not match the one passed through current_id (i.e. if the ref has changed since the user read it).

\n", "group": "reference" }, "git_reference_target": { "type": "function", - "file": "refs.h", - "line": 240, - "lineto": 240, + "file": "git2/refs.h", + "line": 243, + "lineto": 243, "args": [ { "name": "ref", @@ -14864,16 +15290,14 @@ "comments": "

Only available if the reference is direct (i.e. an object id reference, not a symbolic one).

\n\n

To find the OID of a symbolic ref, call git_reference_resolve() and then this function (or maybe use git_reference_name_to_id() to directly resolve a reference name all the way through to an OID).

\n", "group": "reference", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_reference_target-63" - ] + "general.c": ["ex/v1.9.1/general.html#git_reference_target-70"] } }, "git_reference_target_peel": { "type": "function", - "file": "refs.h", - "line": 251, - "lineto": 251, + "file": "git2/refs.h", + "line": 254, + "lineto": 254, "args": [ { "name": "ref", @@ -14893,9 +15317,9 @@ }, "git_reference_symbolic_target": { "type": "function", - "file": "refs.h", - "line": 261, - "lineto": 261, + "file": "git2/refs.h", + "line": 264, + "lineto": 264, "args": [ { "name": "ref", @@ -14914,18 +15338,16 @@ "group": "reference", "examples": { "general.c": [ - "ex/HEAD/general.html#git_reference_symbolic_target-64" + "ex/v1.9.1/general.html#git_reference_symbolic_target-71" ], - "merge.c": [ - "ex/HEAD/merge.html#git_reference_symbolic_target-30" - ] + "merge.c": ["ex/v1.9.1/merge.html#git_reference_symbolic_target-24"] } }, "git_reference_type": { "type": "function", - "file": "refs.h", - "line": 271, - "lineto": 271, + "file": "git2/refs.h", + "line": 274, + "lineto": 274, "args": [ { "name": "ref", @@ -14935,24 +15357,19 @@ ], "argline": "const git_reference *ref", "sig": "const git_reference *", - "return": { - "type": "git_ref_t", - "comment": " the type" - }, + "return": { "type": "git_reference_t", "comment": " the type" }, "description": "

Get the type of a reference.

\n", - "comments": "

Either direct (GIT_REF_OID) or symbolic (GIT_REF_SYMBOLIC)

\n", + "comments": "

Either direct (GIT_REFERENCE_DIRECT) or symbolic (GIT_REFERENCE_SYMBOLIC)

\n", "group": "reference", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_reference_type-65" - ] + "general.c": ["ex/v1.9.1/general.html#git_reference_type-72"] } }, "git_reference_name": { "type": "function", - "file": "refs.h", - "line": 281, - "lineto": 281, + "file": "git2/refs.h", + "line": 284, + "lineto": 284, "args": [ { "name": "ref", @@ -14970,16 +15387,15 @@ "comments": "

See git_reference_symbolic_create() for rules about valid names.

\n", "group": "reference", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_reference_name-31" - ] + "checkout.c": ["ex/v1.9.1/checkout.html#git_reference_name-17"], + "merge.c": ["ex/v1.9.1/merge.html#git_reference_name-25"] } }, "git_reference_resolve": { "type": "function", - "file": "refs.h", - "line": 299, - "lineto": 299, + "file": "git2/refs.h", + "line": 302, + "lineto": 302, "args": [ { "name": "out", @@ -14994,19 +15410,16 @@ ], "argline": "git_reference **out, const git_reference *ref", "sig": "git_reference **::const git_reference *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Resolve a symbolic reference to a direct reference.

\n", "comments": "

This method iteratively peels a symbolic reference until it resolves to a direct reference to an OID.

\n\n

The peeled reference is returned in the resolved_ref argument, and must be freed manually once it's no longer needed.

\n\n

If a direct reference is passed as an argument, a copy of that reference is returned. This copy must be manually freed too.

\n", "group": "reference" }, "git_reference_owner": { "type": "function", - "file": "refs.h", - "line": 307, - "lineto": 307, + "file": "git2/refs.h", + "line": 310, + "lineto": 310, "args": [ { "name": "ref", @@ -15026,9 +15439,9 @@ }, "git_reference_symbolic_set_target": { "type": "function", - "file": "refs.h", - "line": 329, - "lineto": 333, + "file": "git2/refs.h", + "line": 332, + "lineto": 336, "args": [ { "name": "out", @@ -15058,14 +15471,14 @@ "comment": " 0 on success, GIT_EINVALIDSPEC or an error code" }, "description": "

Create a new reference with the same name as the given reference but a\n different symbolic target. The reference must be a symbolic reference,\n otherwise this will fail.

\n", - "comments": "

The new reference will be written to disk, overwriting the given reference.

\n\n

The target name will be checked for validity. See git_reference_symbolic_create() for rules about valid names.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and and it does not have a reflog.

\n", + "comments": "

The new reference will be written to disk, overwriting the given reference.

\n\n

The target name will be checked for validity. See git_reference_symbolic_create() for rules about valid names.

\n\n

The message for the reflog will be ignored if the reference does not belong in the standard set (HEAD, branches and remote-tracking branches) and it does not have a reflog.

\n", "group": "reference" }, "git_reference_set_target": { "type": "function", - "file": "refs.h", - "line": 349, - "lineto": 353, + "file": "git2/refs.h", + "line": 352, + "lineto": 356, "args": [ { "name": "out", @@ -15098,21 +15511,19 @@ "comments": "

The new reference will be written to disk, overwriting the given reference.

\n", "group": "reference", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_reference_set_target-32" - ] + "merge.c": ["ex/v1.9.1/merge.html#git_reference_set_target-26"] } }, "git_reference_rename": { "type": "function", - "file": "refs.h", - "line": 378, - "lineto": 383, + "file": "git2/refs.h", + "line": 382, + "lineto": 387, "args": [ { "name": "new_ref", "type": "git_reference **", - "comment": null + "comment": "The new reference" }, { "name": "ref", @@ -15147,9 +15558,9 @@ }, "git_reference_delete": { "type": "function", - "file": "refs.h", - "line": 398, - "lineto": 398, + "file": "git2/refs.h", + "line": 402, + "lineto": 402, "args": [ { "name": "ref", @@ -15169,14 +15580,14 @@ }, "git_reference_remove": { "type": "function", - "file": "refs.h", - "line": 409, - "lineto": 409, + "file": "git2/refs.h", + "line": 414, + "lineto": 414, "args": [ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "The repository to remove the reference from" }, { "name": "name", @@ -15186,19 +15597,16 @@ ], "argline": "git_repository *repo, const char *name", "sig": "git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Delete an existing reference by name

\n", "comments": "

This method removes the named reference from the repository without looking at its old value.

\n", "group": "reference" }, "git_reference_list": { "type": "function", - "file": "refs.h", - "line": 423, - "lineto": 423, + "file": "git2/refs.h", + "line": 428, + "lineto": 428, "args": [ { "name": "array", @@ -15213,24 +15621,19 @@ ], "argline": "git_strarray *array, git_repository *repo", "sig": "git_strarray *::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Fill a list with all the references that can be found in a repository.

\n", "comments": "

The string array will be filled with the names of all references; these values are owned by the user and should be free'd manually when no longer needed, using git_strarray_free().

\n", "group": "reference", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_reference_list-66" - ] + "general.c": ["ex/v1.9.1/general.html#git_reference_list-73"] } }, "git_reference_foreach": { "type": "function", - "file": "refs.h", - "line": 444, - "lineto": 447, + "file": "git2/refs.h", + "line": 468, + "lineto": 471, "args": [ { "name": "repo", @@ -15260,9 +15663,9 @@ }, "git_reference_foreach_name": { "type": "function", - "file": "refs.h", - "line": 462, - "lineto": 465, + "file": "git2/refs.h", + "line": 486, + "lineto": 489, "args": [ { "name": "repo", @@ -15292,9 +15695,9 @@ }, "git_reference_dup": { "type": "function", - "file": "refs.h", - "line": 476, - "lineto": 476, + "file": "git2/refs.h", + "line": 500, + "lineto": 500, "args": [ { "name": "dest", @@ -15309,55 +15712,46 @@ ], "argline": "git_reference **dest, git_reference *source", "sig": "git_reference **::git_reference *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a copy of an existing reference.

\n", "comments": "

Call git_reference_free to free the data.

\n", "group": "reference" }, "git_reference_free": { "type": "function", - "file": "refs.h", - "line": 483, - "lineto": 483, + "file": "git2/refs.h", + "line": 507, + "lineto": 507, "args": [ - { - "name": "ref", - "type": "git_reference *", - "comment": "git_reference" - } + { "name": "ref", "type": "git_reference *", "comment": "git_reference" } ], "argline": "git_reference *ref", "sig": "git_reference *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free the given reference.

\n", "comments": "", "group": "reference", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_reference_free-67" + "checkout.c": [ + "ex/v1.9.1/checkout.html#git_reference_free-18", + "ex/v1.9.1/checkout.html#git_reference_free-19", + "ex/v1.9.1/checkout.html#git_reference_free-20" ], + "commit.c": ["ex/v1.9.1/commit.html#git_reference_free-7"], + "general.c": ["ex/v1.9.1/general.html#git_reference_free-74"], "merge.c": [ - "ex/HEAD/merge.html#git_reference_free-33", - "ex/HEAD/merge.html#git_reference_free-34", - "ex/HEAD/merge.html#git_reference_free-35", - "ex/HEAD/merge.html#git_reference_free-36" + "ex/v1.9.1/merge.html#git_reference_free-27", + "ex/v1.9.1/merge.html#git_reference_free-28", + "ex/v1.9.1/merge.html#git_reference_free-29" ], - "status.c": [ - "ex/HEAD/status.html#git_reference_free-3" - ] + "status.c": ["ex/v1.9.1/status.html#git_reference_free-1"] } }, "git_reference_cmp": { "type": "function", - "file": "refs.h", - "line": 492, - "lineto": 494, + "file": "git2/refs.h", + "line": 516, + "lineto": 518, "args": [ { "name": "ref1", @@ -15382,9 +15776,9 @@ }, "git_reference_iterator_new": { "type": "function", - "file": "refs.h", - "line": 503, - "lineto": 505, + "file": "git2/refs.h", + "line": 527, + "lineto": 529, "args": [ { "name": "out", @@ -15399,19 +15793,16 @@ ], "argline": "git_reference_iterator **out, git_repository *repo", "sig": "git_reference_iterator **::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create an iterator for the repo's references

\n", "comments": "", "group": "reference" }, "git_reference_iterator_glob_new": { "type": "function", - "file": "refs.h", - "line": 516, - "lineto": 519, + "file": "git2/refs.h", + "line": 540, + "lineto": 543, "args": [ { "name": "out", @@ -15431,19 +15822,16 @@ ], "argline": "git_reference_iterator **out, git_repository *repo, const char *glob", "sig": "git_reference_iterator **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create an iterator for the repo's references that match the\n specified glob

\n", "comments": "", "group": "reference" }, "git_reference_next": { "type": "function", - "file": "refs.h", - "line": 528, - "lineto": 528, + "file": "git2/refs.h", + "line": 552, + "lineto": 552, "args": [ { "name": "out", @@ -15468,9 +15856,9 @@ }, "git_reference_next_name": { "type": "function", - "file": "refs.h", - "line": 541, - "lineto": 541, + "file": "git2/refs.h", + "line": 565, + "lineto": 565, "args": [ { "name": "out", @@ -15495,9 +15883,9 @@ }, "git_reference_iterator_free": { "type": "function", - "file": "refs.h", - "line": 548, - "lineto": 548, + "file": "git2/refs.h", + "line": 572, + "lineto": 572, "args": [ { "name": "iter", @@ -15507,19 +15895,16 @@ ], "argline": "git_reference_iterator *iter", "sig": "git_reference_iterator *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free the iterator and its associated resources

\n", "comments": "", "group": "reference" }, "git_reference_foreach_glob": { "type": "function", - "file": "refs.h", - "line": 568, - "lineto": 572, + "file": "git2/refs.h", + "line": 592, + "lineto": 596, "args": [ { "name": "repo", @@ -15554,9 +15939,9 @@ }, "git_reference_has_log": { "type": "function", - "file": "refs.h", - "line": 582, - "lineto": 582, + "file": "git2/refs.h", + "line": 606, + "lineto": 606, "args": [ { "name": "repo", @@ -15581,9 +15966,9 @@ }, "git_reference_ensure_log": { "type": "function", - "file": "refs.h", - "line": 594, - "lineto": 594, + "file": "git2/refs.h", + "line": 618, + "lineto": 618, "args": [ { "name": "repo", @@ -15598,19 +15983,16 @@ ], "argline": "git_repository *repo, const char *refname", "sig": "git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code." - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Ensure there is a reflog for a particular reference.

\n", "comments": "

Make sure that successive updates to the reference will append to its log.

\n", "group": "reference" }, "git_reference_is_branch": { "type": "function", - "file": "refs.h", - "line": 604, - "lineto": 604, + "file": "git2/refs.h", + "line": 628, + "lineto": 628, "args": [ { "name": "ref", @@ -15630,9 +16012,9 @@ }, "git_reference_is_remote": { "type": "function", - "file": "refs.h", - "line": 614, - "lineto": 614, + "file": "git2/refs.h", + "line": 638, + "lineto": 638, "args": [ { "name": "ref", @@ -15648,13 +16030,16 @@ }, "description": "

Check if a reference is a remote tracking branch

\n", "comments": "", - "group": "reference" + "group": "reference", + "examples": { + "checkout.c": ["ex/v1.9.1/checkout.html#git_reference_is_remote-21"] + } }, "git_reference_is_tag": { "type": "function", - "file": "refs.h", - "line": 624, - "lineto": 624, + "file": "git2/refs.h", + "line": 648, + "lineto": 648, "args": [ { "name": "ref", @@ -15674,9 +16059,9 @@ }, "git_reference_is_note": { "type": "function", - "file": "refs.h", - "line": 634, - "lineto": 634, + "file": "git2/refs.h", + "line": 658, + "lineto": 658, "args": [ { "name": "ref", @@ -15696,9 +16081,9 @@ }, "git_reference_normalize_name": { "type": "function", - "file": "refs.h", - "line": 690, - "lineto": 694, + "file": "git2/refs.h", + "line": 714, + "lineto": 718, "args": [ { "name": "buffer_out", @@ -15718,7 +16103,7 @@ { "name": "flags", "type": "unsigned int", - "comment": "Flags to constrain name validation rules - see the\n GIT_REF_FORMAT constants above." + "comment": "Flags to constrain name validation rules - see the\n GIT_REFERENCE_FORMAT constants above." } ], "argline": "char *buffer_out, size_t buffer_size, const char *name, unsigned int flags", @@ -15733,9 +16118,9 @@ }, "git_reference_peel": { "type": "function", - "file": "refs.h", - "line": 711, - "lineto": 714, + "file": "git2/refs.h", + "line": 735, + "lineto": 738, "args": [ { "name": "out", @@ -15744,57 +16129,55 @@ }, { "name": "ref", - "type": "git_reference *", + "type": "const git_reference *", "comment": "The reference to be processed" }, { "name": "type", - "type": "git_otype", - "comment": "The type of the requested object (GIT_OBJ_COMMIT,\n GIT_OBJ_TAG, GIT_OBJ_TREE, GIT_OBJ_BLOB or GIT_OBJ_ANY)." + "type": "git_object_t", + "comment": "The type of the requested object (GIT_OBJECT_COMMIT,\n GIT_OBJECT_TAG, GIT_OBJECT_TREE, GIT_OBJECT_BLOB or GIT_OBJECT_ANY)." } ], - "argline": "git_object **out, git_reference *ref, git_otype type", - "sig": "git_object **::git_reference *::git_otype", + "argline": "git_object **out, const git_reference *ref, git_object_t type", + "sig": "git_object **::const git_reference *::git_object_t", "return": { "type": "int", "comment": " 0 on success, GIT_EAMBIGUOUS, GIT_ENOTFOUND or an error code" }, "description": "

Recursively peel reference until object of the specified type is found.

\n", - "comments": "

The retrieved peeled object is owned by the repository and should be closed with the git_object_free method.

\n\n

If you pass GIT_OBJ_ANY as the target type, then the object will be peeled until a non-tag object is met.

\n", + "comments": "

The retrieved peeled object is owned by the repository and should be closed with the git_object_free method.

\n\n

If you pass GIT_OBJECT_ANY as the target type, then the object will be peeled until a non-tag object is met.

\n", "group": "reference", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_reference_peel-37" - ] - } + "examples": { "merge.c": ["ex/v1.9.1/merge.html#git_reference_peel-30"] } }, - "git_reference_is_valid_name": { + "git_reference_name_is_valid": { "type": "function", - "file": "refs.h", - "line": 730, - "lineto": 730, + "file": "git2/refs.h", + "line": 755, + "lineto": 755, "args": [ + { + "name": "valid", + "type": "int *", + "comment": "output pointer to set with validity of given reference name" + }, { "name": "refname", "type": "const char *", "comment": "name to be checked." } ], - "argline": "const char *refname", - "sig": "const char *", - "return": { - "type": "int", - "comment": " 1 if the reference name is acceptable; 0 if it isn't" - }, + "argline": "int *valid, const char *refname", + "sig": "int *::const char *", + "return": { "type": "int", "comment": " 0 on success or an error code" }, "description": "

Ensure the reference name is well-formed.

\n", "comments": "

Valid reference names must follow one of two patterns:

\n\n
    \n
  1. Top-level names must contain only capital letters and underscores, and must begin and end with a letter. (e.g. "HEAD", "ORIG_HEAD"). 2. Names prefixed with "refs/" can be almost anything. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.
  2. \n
\n", "group": "reference" }, "git_reference_shorthand": { "type": "function", - "file": "refs.h", - "line": 744, - "lineto": 744, + "file": "git2/refs.h", + "line": 769, + "lineto": 769, "args": [ { "name": "ref", @@ -15812,16 +16195,65 @@ "comments": "

This will transform the reference name into a name "human-readable" version. If no shortname is appropriate, it will return the full name.

\n\n

The memory is owned by the reference and must not be freed.

\n", "group": "reference", "examples": { - "status.c": [ - "ex/HEAD/status.html#git_reference_shorthand-4" - ] + "status.c": ["ex/v1.9.1/status.html#git_reference_shorthand-2"] } }, + "git_refspec_parse": { + "type": "function", + "file": "git2/refspec.h", + "line": 32, + "lineto": 32, + "args": [ + { + "name": "refspec", + "type": "git_refspec **", + "comment": "a pointer to hold the refspec handle" + }, + { + "name": "input", + "type": "const char *", + "comment": "the refspec string" + }, + { + "name": "is_fetch", + "type": "int", + "comment": "is this a refspec for a fetch" + } + ], + "argline": "git_refspec **refspec, const char *input, int is_fetch", + "sig": "git_refspec **::const char *::int", + "return": { + "type": "int", + "comment": " 0 if the refspec string could be parsed, -1 otherwise" + }, + "description": "

Parse a given refspec string

\n", + "comments": "", + "group": "refspec" + }, + "git_refspec_free": { + "type": "function", + "file": "git2/refspec.h", + "line": 39, + "lineto": 39, + "args": [ + { + "name": "refspec", + "type": "git_refspec *", + "comment": "the refspec object" + } + ], + "argline": "git_refspec *refspec", + "sig": "git_refspec *", + "return": { "type": "void", "comment": null }, + "description": "

Free a refspec object which has been created by git_refspec_parse

\n", + "comments": "", + "group": "refspec" + }, "git_refspec_src": { "type": "function", - "file": "refspec.h", - "line": 30, - "lineto": 30, + "file": "git2/refspec.h", + "line": 47, + "lineto": 47, "args": [ { "name": "refspec", @@ -15841,9 +16273,9 @@ }, "git_refspec_dst": { "type": "function", - "file": "refspec.h", - "line": 38, - "lineto": 38, + "file": "git2/refspec.h", + "line": 55, + "lineto": 55, "args": [ { "name": "refspec", @@ -15863,9 +16295,9 @@ }, "git_refspec_string": { "type": "function", - "file": "refspec.h", - "line": 46, - "lineto": 46, + "file": "git2/refspec.h", + "line": 63, + "lineto": 63, "args": [ { "name": "refspec", @@ -15877,7 +16309,7 @@ "sig": "const git_refspec *", "return": { "type": "const char *", - "comment": null + "comment": " the refspec's original string" }, "description": "

Get the refspec's string

\n", "comments": "", @@ -15885,9 +16317,9 @@ }, "git_refspec_force": { "type": "function", - "file": "refspec.h", - "line": 54, - "lineto": 54, + "file": "git2/refspec.h", + "line": 71, + "lineto": 71, "args": [ { "name": "refspec", @@ -15907,15 +16339,11 @@ }, "git_refspec_direction": { "type": "function", - "file": "refspec.h", - "line": 62, - "lineto": 62, + "file": "git2/refspec.h", + "line": 79, + "lineto": 79, "args": [ - { - "name": "spec", - "type": "const git_refspec *", - "comment": "refspec" - } + { "name": "spec", "type": "const git_refspec *", "comment": "refspec" } ], "argline": "const git_refspec *spec", "sig": "const git_refspec *", @@ -15927,11 +16355,38 @@ "comments": "", "group": "refspec" }, + "git_refspec_src_matches_negative": { + "type": "function", + "file": "git2/refspec.h", + "line": 88, + "lineto": 88, + "args": [ + { + "name": "refspec", + "type": "const git_refspec *", + "comment": "the refspec" + }, + { + "name": "refname", + "type": "const char *", + "comment": "the name of the reference to check" + } + ], + "argline": "const git_refspec *refspec, const char *refname", + "sig": "const git_refspec *::const char *", + "return": { + "type": "int", + "comment": " 1 if the refspec matches, 0 otherwise" + }, + "description": "

Check if a refspec's source descriptor matches a negative reference

\n", + "comments": "", + "group": "refspec" + }, "git_refspec_src_matches": { "type": "function", - "file": "refspec.h", - "line": 71, - "lineto": 71, + "file": "git2/refspec.h", + "line": 97, + "lineto": 97, "args": [ { "name": "refspec", @@ -15950,15 +16405,15 @@ "type": "int", "comment": " 1 if the refspec matches, 0 otherwise" }, - "description": "

Check if a refspec's source descriptor matches a reference

\n", + "description": "

Check if a refspec's source descriptor matches a reference

\n", "comments": "", "group": "refspec" }, "git_refspec_dst_matches": { "type": "function", - "file": "refspec.h", - "line": 80, - "lineto": 80, + "file": "git2/refspec.h", + "line": 106, + "lineto": 106, "args": [ { "name": "refspec", @@ -15983,9 +16438,9 @@ }, "git_refspec_transform": { "type": "function", - "file": "refspec.h", - "line": 90, - "lineto": 90, + "file": "git2/refspec.h", + "line": 116, + "lineto": 116, "args": [ { "name": "out", @@ -16005,19 +16460,16 @@ ], "argline": "git_buf *out, const git_refspec *spec, const char *name", "sig": "git_buf *::const git_refspec *::const char *", - "return": { - "type": "int", - "comment": " 0, GIT_EBUFS or another error" - }, + "return": { "type": "int", "comment": " 0, GIT_EBUFS or another error" }, "description": "

Transform a reference to its target following the refspec's rules

\n", "comments": "", "group": "refspec" }, "git_refspec_rtransform": { "type": "function", - "file": "refspec.h", - "line": 100, - "lineto": 100, + "file": "git2/refspec.h", + "line": 126, + "lineto": 126, "args": [ { "name": "out", @@ -16037,17 +16489,17 @@ ], "argline": "git_buf *out, const git_refspec *spec, const char *name", "sig": "git_buf *::const git_refspec *::const char *", - "return": { - "type": "int", - "comment": " 0, GIT_EBUFS or another error" - }, + "return": { "type": "int", "comment": " 0, GIT_EBUFS or another error" }, "description": "

Transform a target reference to its source reference following the refspec's rules

\n", "comments": "", - "group": "refspec" + "group": "refspec", + "examples": { + "fetch.c": ["ex/v1.9.1/fetch.html#git_refspec_rtransform-4"] + } }, "git_remote_create": { "type": "function", - "file": "remote.h", + "file": "git2/remote.h", "line": 38, "lineto": 42, "args": [ @@ -16066,11 +16518,7 @@ "type": "const char *", "comment": "the remote's name" }, - { - "name": "url", - "type": "const char *", - "comment": "the remote's url" - } + { "name": "url", "type": "const char *", "comment": "the remote's url" } ], "argline": "git_remote **out, git_repository *repo, const char *name, const char *url", "sig": "git_remote **::git_repository *::const char *::const char *", @@ -16081,17 +16529,72 @@ "description": "

Add a remote with the default fetch refspec to the repository's configuration.

\n", "comments": "", "group": "remote", - "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_create-4" - ] - } + "examples": { "remote.c": ["ex/v1.9.1/remote.html#git_remote_create-1"] } + }, + "git_remote_create_options_init": { + "type": "function", + "file": "git2/remote.h", + "line": 135, + "lineto": 137, + "args": [ + { + "name": "opts", + "type": "git_remote_create_options *", + "comment": "The `git_remote_create_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_REMOTE_CREATE_OPTIONS_VERSION`." + } + ], + "argline": "git_remote_create_options *opts, unsigned int version", + "sig": "git_remote_create_options *::unsigned int", + "return": { + "type": "int", + "comment": " Zero on success; -1 on failure." + }, + "description": "

Initialize git_remote_create_options structure

\n", + "comments": "

Initializes a git_remote_create_options with default values. Equivalent to creating an instance with GIT_REMOTE_CREATE_OPTIONS_INIT.

\n", + "group": "remote" + }, + "git_remote_create_with_opts": { + "type": "function", + "file": "git2/remote.h", + "line": 151, + "lineto": 154, + "args": [ + { + "name": "out", + "type": "git_remote **", + "comment": "the resulting remote" + }, + { + "name": "url", + "type": "const char *", + "comment": "the remote's url" + }, + { + "name": "opts", + "type": "const git_remote_create_options *", + "comment": "the remote creation options" + } + ], + "argline": "git_remote **out, const char *url, const git_remote_create_options *opts", + "sig": "git_remote **::const char *::const git_remote_create_options *", + "return": { + "type": "int", + "comment": " 0, GIT_EINVALIDSPEC, GIT_EEXISTS or an error code" + }, + "description": "

Create a remote, with options.

\n", + "comments": "

This function allows more fine-grained control over the remote creation.

\n\n

Passing NULL as the opts argument will result in a detached remote.

\n", + "group": "remote" }, "git_remote_create_with_fetchspec": { "type": "function", - "file": "remote.h", - "line": 55, - "lineto": 60, + "file": "git2/remote.h", + "line": 167, + "lineto": 172, "args": [ { "name": "out", @@ -16131,9 +16634,9 @@ }, "git_remote_create_anonymous": { "type": "function", - "file": "remote.h", - "line": 73, - "lineto": 76, + "file": "git2/remote.h", + "line": 185, + "lineto": 188, "args": [ { "name": "out", @@ -16153,27 +16656,22 @@ ], "argline": "git_remote **out, git_repository *repo, const char *url", "sig": "git_remote **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create an anonymous remote

\n", "comments": "

Create a remote with the given url in-memory. You can use this when you have a URL instead of a remote's name.

\n", "group": "remote", "examples": { - "network/fetch.c": [ - "ex/HEAD/network/fetch.html#git_remote_create_anonymous-4" - ], - "network/ls-remote.c": [ - "ex/HEAD/network/ls-remote.html#git_remote_create_anonymous-2" + "fetch.c": ["ex/v1.9.1/fetch.html#git_remote_create_anonymous-5"], + "ls-remote.c": [ + "ex/v1.9.1/ls-remote.html#git_remote_create_anonymous-2" ] } }, "git_remote_create_detached": { "type": "function", - "file": "remote.h", - "line": 92, - "lineto": 94, + "file": "git2/remote.h", + "line": 204, + "lineto": 206, "args": [ { "name": "out", @@ -16188,19 +16686,16 @@ ], "argline": "git_remote **out, const char *url", "sig": "git_remote **::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a remote without a connected local repo

\n", "comments": "

Create a remote with the given url in-memory. You can use this when you have a URL instead of a remote's name.

\n\n

Contrasted with git_remote_create_anonymous, a detached remote will not consider any repo configuration values (such as insteadof url substitutions).

\n", "group": "remote" }, "git_remote_lookup": { "type": "function", - "file": "remote.h", - "line": 107, - "lineto": 107, + "file": "git2/remote.h", + "line": 219, + "lineto": 219, "args": [ { "name": "out", @@ -16228,22 +16723,17 @@ "comments": "

The name will be checked for validity. See git_tag_create() for rules about valid names.

\n", "group": "remote", "examples": { - "network/fetch.c": [ - "ex/HEAD/network/fetch.html#git_remote_lookup-5" - ], - "network/ls-remote.c": [ - "ex/HEAD/network/ls-remote.html#git_remote_lookup-3" - ], - "remote.c": [ - "ex/HEAD/remote.html#git_remote_lookup-5" - ] + "fetch.c": ["ex/v1.9.1/fetch.html#git_remote_lookup-6"], + "ls-remote.c": ["ex/v1.9.1/ls-remote.html#git_remote_lookup-3"], + "push.c": ["ex/v1.9.1/push.html#git_remote_lookup-1"], + "remote.c": ["ex/v1.9.1/remote.html#git_remote_lookup-2"] } }, "git_remote_dup": { "type": "function", - "file": "remote.h", - "line": 119, - "lineto": 119, + "file": "git2/remote.h", + "line": 231, + "lineto": 231, "args": [ { "name": "dest", @@ -16258,19 +16748,16 @@ ], "argline": "git_remote **dest, git_remote *source", "sig": "git_remote **::git_remote *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a copy of an existing remote. All internal strings are also\n duplicated. Callbacks are not duplicated.

\n", "comments": "

Call git_remote_free to free the data.

\n", "group": "remote" }, "git_remote_owner": { "type": "function", - "file": "remote.h", - "line": 127, - "lineto": 127, + "file": "git2/remote.h", + "line": 239, + "lineto": 239, "args": [ { "name": "remote", @@ -16290,9 +16777,9 @@ }, "git_remote_name": { "type": "function", - "file": "remote.h", - "line": 135, - "lineto": 135, + "file": "git2/remote.h", + "line": 247, + "lineto": 247, "args": [ { "name": "remote", @@ -16312,9 +16799,9 @@ }, "git_remote_url": { "type": "function", - "file": "remote.h", - "line": 146, - "lineto": 146, + "file": "git2/remote.h", + "line": 259, + "lineto": 259, "args": [ { "name": "remote", @@ -16324,24 +16811,17 @@ ], "argline": "const git_remote *remote", "sig": "const git_remote *", - "return": { - "type": "const char *", - "comment": " a pointer to the url" - }, + "return": { "type": "const char *", "comment": " a pointer to the url" }, "description": "

Get the remote's url

\n", - "comments": "

If url.*.insteadOf has been configured for this URL, it will return the modified URL.

\n", + "comments": "

If url.*.insteadOf has been configured for this URL, it will return the modified URL. This function does not consider if a push url has been configured for this remote (use git_remote_pushurl if needed).

\n", "group": "remote", - "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_url-6" - ] - } + "examples": { "remote.c": ["ex/v1.9.1/remote.html#git_remote_url-3"] } }, "git_remote_pushurl": { "type": "function", - "file": "remote.h", - "line": 157, - "lineto": 157, + "file": "git2/remote.h", + "line": 271, + "lineto": 271, "args": [ { "name": "remote", @@ -16355,20 +16835,16 @@ "type": "const char *", "comment": " a pointer to the url or NULL if no special url for pushing is set" }, - "description": "

Get the remote's url for pushing

\n", - "comments": "

If url.*.pushInsteadOf has been configured for this URL, it will return the modified URL.

\n", + "description": "

Get the remote's url for pushing.

\n", + "comments": "

If url.*.pushInsteadOf has been configured for this URL, it will return the modified URL. If git_remote_set_instance_pushurl has been called for this remote, then that URL will be returned.

\n", "group": "remote", - "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_pushurl-7" - ] - } + "examples": { "remote.c": ["ex/v1.9.1/remote.html#git_remote_pushurl-4"] } }, "git_remote_set_url": { "type": "function", - "file": "remote.h", - "line": 170, - "lineto": 170, + "file": "git2/remote.h", + "line": 284, + "lineto": 284, "args": [ { "name": "repo", @@ -16380,32 +16856,21 @@ "type": "const char *", "comment": "the remote's name" }, - { - "name": "url", - "type": "const char *", - "comment": "the url to set" - } + { "name": "url", "type": "const char *", "comment": "the url to set" } ], "argline": "git_repository *repo, const char *remote, const char *url", "sig": "git_repository *::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error value" - }, + "return": { "type": "int", "comment": " 0 or an error value" }, "description": "

Set the remote's url in the configuration

\n", "comments": "

Remote objects already in memory will not be affected. This assumes the common case of a single-url remote and will otherwise return an error.

\n", "group": "remote", - "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_set_url-8" - ] - } + "examples": { "remote.c": ["ex/v1.9.1/remote.html#git_remote_set_url-5"] } }, "git_remote_set_pushurl": { "type": "function", - "file": "remote.h", - "line": 183, - "lineto": 183, + "file": "git2/remote.h", + "line": 298, + "lineto": 298, "args": [ { "name": "repo", @@ -16417,32 +16882,63 @@ "type": "const char *", "comment": "the remote's name" }, - { - "name": "url", - "type": "const char *", - "comment": "the url to set" - } + { "name": "url", "type": "const char *", "comment": "the url to set" } ], "argline": "git_repository *repo, const char *remote, const char *url", "sig": "git_repository *::const char *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0, or an error code" }, "description": "

Set the remote's url for pushing in the configuration.

\n", "comments": "

Remote objects already in memory will not be affected. This assumes the common case of a single-url remote and will otherwise return an error.

\n", "group": "remote", "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_set_pushurl-9" - ] + "remote.c": ["ex/v1.9.1/remote.html#git_remote_set_pushurl-6"] } }, + "git_remote_set_instance_url": { + "type": "function", + "file": "git2/remote.h", + "line": 308, + "lineto": 308, + "args": [ + { + "name": "remote", + "type": "git_remote *", + "comment": "the remote's name" + }, + { "name": "url", "type": "const char *", "comment": "the url to set" } + ], + "argline": "git_remote *remote, const char *url", + "sig": "git_remote *::const char *", + "return": { "type": "int", "comment": " 0 or an error value" }, + "description": "

Set the url for this particular url instance. The URL in the\n configuration will be ignored, and will not be changed.

\n", + "comments": "", + "group": "remote" + }, + "git_remote_set_instance_pushurl": { + "type": "function", + "file": "git2/remote.h", + "line": 318, + "lineto": 318, + "args": [ + { + "name": "remote", + "type": "git_remote *", + "comment": "the remote's name" + }, + { "name": "url", "type": "const char *", "comment": "the url to set" } + ], + "argline": "git_remote *remote, const char *url", + "sig": "git_remote *::const char *", + "return": { "type": "int", "comment": " 0 or an error value" }, + "description": "

Set the push url for this particular url instance. The URL in the\n configuration will be ignored, and will not be changed.

\n", + "comments": "", + "group": "remote" + }, "git_remote_add_fetch": { "type": "function", - "file": "remote.h", - "line": 196, - "lineto": 196, + "file": "git2/remote.h", + "line": 331, + "lineto": 331, "args": [ { "name": "repo", @@ -16472,9 +16968,9 @@ }, "git_remote_get_fetch_refspecs": { "type": "function", - "file": "remote.h", - "line": 207, - "lineto": 207, + "file": "git2/remote.h", + "line": 343, + "lineto": 343, "args": [ { "name": "array", @@ -16489,19 +16985,16 @@ ], "argline": "git_strarray *array, const git_remote *remote", "sig": "git_strarray *::const git_remote *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Get the remote's list of fetch refspecs

\n", "comments": "

The memory is owned by the user and should be freed with git_strarray_free.

\n", "group": "remote" }, "git_remote_add_push": { "type": "function", - "file": "remote.h", - "line": 220, - "lineto": 220, + "file": "git2/remote.h", + "line": 356, + "lineto": 356, "args": [ { "name": "repo", @@ -16531,9 +17024,9 @@ }, "git_remote_get_push_refspecs": { "type": "function", - "file": "remote.h", - "line": 231, - "lineto": 231, + "file": "git2/remote.h", + "line": 368, + "lineto": 368, "args": [ { "name": "array", @@ -16548,19 +17041,16 @@ ], "argline": "git_strarray *array, const git_remote *remote", "sig": "git_strarray *::const git_remote *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Get the remote's list of push refspecs

\n", "comments": "

The memory is owned by the user and should be freed with git_strarray_free.

\n", "group": "remote" }, "git_remote_refspec_count": { "type": "function", - "file": "remote.h", - "line": 239, - "lineto": 239, + "file": "git2/remote.h", + "line": 376, + "lineto": 376, "args": [ { "name": "remote", @@ -16580,20 +17070,16 @@ }, "git_remote_get_refspec": { "type": "function", - "file": "remote.h", - "line": 248, - "lineto": 248, + "file": "git2/remote.h", + "line": 385, + "lineto": 385, "args": [ { "name": "remote", "type": "const git_remote *", "comment": "the remote to query" }, - { - "name": "n", - "type": "size_t", - "comment": "the refspec to get" - } + { "name": "n", "type": "size_t", "comment": "the refspec to get" } ], "argline": "const git_remote *remote, size_t n", "sig": "const git_remote *::size_t", @@ -16605,58 +17091,11 @@ "comments": "", "group": "remote" }, - "git_remote_connect": { - "type": "function", - "file": "remote.h", - "line": 265, - "lineto": 265, - "args": [ - { - "name": "remote", - "type": "git_remote *", - "comment": "the remote to connect to" - }, - { - "name": "direction", - "type": "git_direction", - "comment": "GIT_DIRECTION_FETCH if you want to fetch or\n GIT_DIRECTION_PUSH if you want to push" - }, - { - "name": "callbacks", - "type": "const git_remote_callbacks *", - "comment": "the callbacks to use for this connection" - }, - { - "name": "proxy_opts", - "type": "const git_proxy_options *", - "comment": "proxy settings" - }, - { - "name": "custom_headers", - "type": "const git_strarray *", - "comment": "extra HTTP headers to use in this connection" - } - ], - "argline": "git_remote *remote, git_direction direction, const git_remote_callbacks *callbacks, const git_proxy_options *proxy_opts, const git_strarray *custom_headers", - "sig": "git_remote *::git_direction::const git_remote_callbacks *::const git_proxy_options *::const git_strarray *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Open a connection to a remote

\n", - "comments": "

The transport is selected based on the URL. The direction argument is due to a limitation of the git protocol (over TCP or SSH) which starts up a specific binary which can only do the one or the other.

\n", - "group": "remote", - "examples": { - "network/ls-remote.c": [ - "ex/HEAD/network/ls-remote.html#git_remote_connect-4" - ] - } - }, "git_remote_ls": { "type": "function", - "file": "remote.h", - "line": 287, - "lineto": 287, + "file": "git2/remote.h", + "line": 407, + "lineto": 407, "args": [ { "name": "out", @@ -16668,32 +17107,23 @@ "type": "size_t *", "comment": "the number of remote heads" }, - { - "name": "remote", - "type": "git_remote *", - "comment": "the remote" - } + { "name": "remote", "type": "git_remote *", "comment": "the remote" } ], "argline": "const git_remote_head ***out, size_t *size, git_remote *remote", "sig": "const git_remote_head ***::size_t *::git_remote *", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Get the remote repository's reference advertisement list

\n", "comments": "

Get the list of references with which the server responds to a new connection.

\n\n

The remote (or more exactly its transport) must have connected to the remote repository. This list is available as soon as the connection to the remote is initiated and it remains available after disconnecting.

\n\n

The memory belongs to the remote. The pointer will be valid as long as a new connection is not initiated, but it is recommended that you make a copy in order to make use of the data.

\n", "group": "remote", "examples": { - "network/ls-remote.c": [ - "ex/HEAD/network/ls-remote.html#git_remote_ls-5" - ] + "ls-remote.c": ["ex/v1.9.1/ls-remote.html#git_remote_ls-4"] } }, "git_remote_connected": { "type": "function", - "file": "remote.h", - "line": 298, - "lineto": 298, + "file": "git2/remote.h", + "line": 418, + "lineto": 418, "args": [ { "name": "remote", @@ -16713,31 +17143,24 @@ }, "git_remote_stop": { "type": "function", - "file": "remote.h", - "line": 308, - "lineto": 308, + "file": "git2/remote.h", + "line": 429, + "lineto": 429, "args": [ - { - "name": "remote", - "type": "git_remote *", - "comment": "the remote" - } + { "name": "remote", "type": "git_remote *", "comment": "the remote" } ], "argline": "git_remote *remote", "sig": "git_remote *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Cancel the operation

\n", "comments": "

At certain points in its operation, the network code checks whether the operation has been cancelled and if so stops the operation.

\n", "group": "remote" }, "git_remote_disconnect": { "type": "function", - "file": "remote.h", - "line": 317, - "lineto": 317, + "file": "git2/remote.h", + "line": 439, + "lineto": 439, "args": [ { "name": "remote", @@ -16747,19 +17170,16 @@ ], "argline": "git_remote *remote", "sig": "git_remote *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Disconnect from the remote

\n", "comments": "

Close the connection to the remote.

\n", "group": "remote" }, "git_remote_free": { "type": "function", - "file": "remote.h", - "line": 327, - "lineto": 327, + "file": "git2/remote.h", + "line": 449, + "lineto": 449, "args": [ { "name": "remote", @@ -16769,31 +17189,24 @@ ], "argline": "git_remote *remote", "sig": "git_remote *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free the memory associated with a remote

\n", "comments": "

This also disconnects from the remote, if the connection has not been closed yet (using git_remote_disconnect).

\n", "group": "remote", "examples": { - "network/fetch.c": [ - "ex/HEAD/network/fetch.html#git_remote_free-6", - "ex/HEAD/network/fetch.html#git_remote_free-7" - ], - "network/ls-remote.c": [ - "ex/HEAD/network/ls-remote.html#git_remote_free-6" + "fetch.c": [ + "ex/v1.9.1/fetch.html#git_remote_free-7", + "ex/v1.9.1/fetch.html#git_remote_free-8" ], - "remote.c": [ - "ex/HEAD/remote.html#git_remote_free-10" - ] + "ls-remote.c": ["ex/v1.9.1/ls-remote.html#git_remote_free-5"], + "remote.c": ["ex/v1.9.1/remote.html#git_remote_free-7"] } }, "git_remote_list": { "type": "function", - "file": "remote.h", - "line": 338, - "lineto": 338, + "file": "git2/remote.h", + "line": 460, + "lineto": 460, "args": [ { "name": "out", @@ -16808,24 +17221,20 @@ ], "argline": "git_strarray *out, git_repository *repo", "sig": "git_strarray *::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get a list of the configured remotes for a repo

\n", "comments": "

The string array must be freed by the user.

\n", "group": "remote", "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_list-11" - ] + "checkout.c": ["ex/v1.9.1/checkout.html#git_remote_list-22"], + "remote.c": ["ex/v1.9.1/remote.html#git_remote_list-8"] } }, "git_remote_init_callbacks": { "type": "function", - "file": "remote.h", - "line": 503, - "lineto": 505, + "file": "git2/remote.h", + "line": 714, + "lineto": 716, "args": [ { "name": "opts", @@ -16846,23 +17255,26 @@ }, "description": "

Initializes a git_remote_callbacks with default values. Equivalent to\n creating an instance with GIT_REMOTE_CALLBACKS_INIT.

\n", "comments": "", - "group": "remote" + "group": "remote", + "examples": { + "push.c": ["ex/v1.9.1/push.html#git_remote_init_callbacks-2"] + } }, - "git_fetch_init_options": { + "git_fetch_options_init": { "type": "function", - "file": "remote.h", - "line": 607, - "lineto": 609, + "file": "git2/remote.h", + "line": 852, + "lineto": 854, "args": [ { "name": "opts", "type": "git_fetch_options *", - "comment": "the `git_fetch_options` instance to initialize." + "comment": "The `git_fetch_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "the version of the struct; you should pass\n `GIT_FETCH_OPTIONS_VERSION` here." + "comment": "The struct version; pass `GIT_FETCH_OPTIONS_VERSION`." } ], "argline": "git_fetch_options *opts, unsigned int version", @@ -16871,25 +17283,25 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_fetch_options with default values. Equivalent to\n creating an instance with GIT_FETCH_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_fetch_options structure

\n", + "comments": "

Initializes a git_fetch_options with default values. Equivalent to creating an instance with GIT_FETCH_OPTIONS_INIT.

\n", "group": "fetch" }, - "git_push_init_options": { + "git_push_options_init": { "type": "function", - "file": "remote.h", - "line": 656, - "lineto": 658, + "file": "git2/remote.h", + "line": 917, + "lineto": 919, "args": [ { "name": "opts", "type": "git_push_options *", - "comment": "the `git_push_options` instance to initialize." + "comment": "The `git_push_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "the version of the struct; you should pass\n `GIT_PUSH_OPTIONS_VERSION` here." + "comment": "The struct version; pass `GIT_PUSH_OPTIONS_VERSION`." } ], "argline": "git_push_options *opts, unsigned int version", @@ -16898,21 +17310,116 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_push_options with default values. Equivalent to\n creating an instance with GIT_PUSH_OPTIONS_INIT.

\n", - "comments": "", - "group": "push" + "description": "

Initialize git_push_options structure

\n", + "comments": "

Initializes a git_push_options with default values. Equivalent to creating an instance with GIT_PUSH_OPTIONS_INIT.

\n", + "group": "push", + "examples": { "push.c": ["ex/v1.9.1/push.html#git_push_options_init-3"] } }, - "git_remote_download": { + "git_remote_connect_options_init": { "type": "function", - "file": "remote.h", - "line": 676, - "lineto": 676, + "file": "git2/remote.h", + "line": 968, + "lineto": 970, + "args": [ + { + "name": "opts", + "type": "git_remote_connect_options *", + "comment": "The `git_remote_connect_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_REMOTE_CONNECT_OPTIONS_VERSION`." + } + ], + "argline": "git_remote_connect_options *opts, unsigned int version", + "sig": "git_remote_connect_options *::unsigned int", + "return": { + "type": "int", + "comment": " Zero on success; -1 on failure." + }, + "description": "

Initialize git_remote_connect_options structure.

\n", + "comments": "

Initializes a git_remote_connect_options with default values. Equivalent to creating an instance with GIT_REMOTE_CONNECT_OPTIONS_INIT.

\n", + "group": "remote" + }, + "git_remote_connect": { + "type": "function", + "file": "git2/remote.h", + "line": 987, + "lineto": 992, "args": [ { "name": "remote", "type": "git_remote *", - "comment": "the remote" + "comment": "the remote to connect to" + }, + { + "name": "direction", + "type": "git_direction", + "comment": "GIT_DIRECTION_FETCH if you want to fetch or\n GIT_DIRECTION_PUSH if you want to push" }, + { + "name": "callbacks", + "type": "const git_remote_callbacks *", + "comment": "the callbacks to use for this connection" + }, + { + "name": "proxy_opts", + "type": "const git_proxy_options *", + "comment": "proxy settings" + }, + { + "name": "custom_headers", + "type": "const git_strarray *", + "comment": "extra HTTP headers to use in this connection" + } + ], + "argline": "git_remote *remote, git_direction direction, const git_remote_callbacks *callbacks, const git_proxy_options *proxy_opts, const git_strarray *custom_headers", + "sig": "git_remote *::git_direction::const git_remote_callbacks *::const git_proxy_options *::const git_strarray *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Open a connection to a remote.

\n", + "comments": "

The transport is selected based on the URL; the direction argument is due to a limitation of the git protocol which starts up a specific binary which can only do the one or the other.

\n", + "group": "remote", + "examples": { + "ls-remote.c": ["ex/v1.9.1/ls-remote.html#git_remote_connect-6"] + } + }, + "git_remote_connect_ext": { + "type": "function", + "file": "git2/remote.h", + "line": 1012, + "lineto": 1015, + "args": [ + { + "name": "remote", + "type": "git_remote *", + "comment": "the remote to connect to" + }, + { + "name": "direction", + "type": "git_direction", + "comment": "GIT_DIRECTION_FETCH if you want to fetch or\n GIT_DIRECTION_PUSH if you want to push" + }, + { + "name": "opts", + "type": "const git_remote_connect_options *", + "comment": "the remote connection options" + } + ], + "argline": "git_remote *remote, git_direction direction, const git_remote_connect_options *opts", + "sig": "git_remote *::git_direction::const git_remote_connect_options *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Open a connection to a remote with extended options.

\n", + "comments": "

The transport is selected based on the URL; the direction argument is due to a limitation of the git protocol which starts up a specific binary which can only do the one or the other.

\n\n

The given options structure will form the defaults for connection options and callback setup. Callers may override these defaults by specifying git_fetch_options or git_push_options in subsequent calls.

\n", + "group": "remote" + }, + "git_remote_download": { + "type": "function", + "file": "git2/remote.h", + "line": 1037, + "lineto": 1040, + "args": [ + { "name": "remote", "type": "git_remote *", "comment": "the remote" }, { "name": "refspecs", "type": "const git_strarray *", @@ -16921,30 +17428,23 @@ { "name": "opts", "type": "const git_fetch_options *", - "comment": "the options to use for this fetch" + "comment": "the options to use for this fetch or NULL" } ], "argline": "git_remote *remote, const git_strarray *refspecs, const git_fetch_options *opts", "sig": "git_remote *::const git_strarray *::const git_fetch_options *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Download and index the packfile

\n", - "comments": "

Connect to the remote if it hasn't been done yet, negotiate with the remote git which objects are missing, download and index the packfile.

\n\n

The .idx file will be created and both it and the packfile with be renamed to their final name.

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Download and index the packfile.

\n", + "comments": "

Connect to the remote if it hasn't been done yet, negotiate with the remote git which objects are missing, download and index the packfile.

\n\n

The .idx file will be created and both it and the packfile with be renamed to their final name.

\n\n

If options are specified and this remote is already connected then the existing remote connection options will be discarded and the remote will now use the new options.

\n", "group": "remote" }, "git_remote_upload": { "type": "function", - "file": "remote.h", - "line": 690, - "lineto": 690, + "file": "git2/remote.h", + "line": 1059, + "lineto": 1062, "args": [ - { - "name": "remote", - "type": "git_remote *", - "comment": "the remote" - }, + { "name": "remote", "type": "git_remote *", "comment": "the remote" }, { "name": "refspecs", "type": "const git_strarray *", @@ -16958,19 +17458,16 @@ ], "argline": "git_remote *remote, const git_strarray *refspecs, const git_push_options *opts", "sig": "git_remote *::const git_strarray *::const git_push_options *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a packfile and send it to the server

\n", - "comments": "

Connect to the remote if it hasn't been done yet, negotiate with the remote git which objects are missing, create a packfile with the missing objects and send it.

\n", + "comments": "

Connect to the remote if it hasn't been done yet, negotiate with the remote git which objects are missing, create a packfile with the missing objects and send it.

\n\n

If options are specified and this remote is already connected then the existing remote connection options will be discarded and the remote will now use the new options.

\n", "group": "remote" }, "git_remote_update_tips": { "type": "function", - "file": "remote.h", - "line": 706, - "lineto": 711, + "file": "git2/remote.h", + "line": 1081, + "lineto": 1086, "args": [ { "name": "remote", @@ -16980,12 +17477,12 @@ { "name": "callbacks", "type": "const git_remote_callbacks *", - "comment": "pointer to the callback structure to use" + "comment": "pointer to the callback structure to use or NULL" }, { - "name": "update_fetchhead", - "type": "int", - "comment": "whether to write to FETCH_HEAD. Pass 1 to behave like git." + "name": "update_flags", + "type": "unsigned int", + "comment": "the git_remote_update_flags for these tips." }, { "name": "download_tags", @@ -16998,21 +17495,18 @@ "comment": "The message to insert into the reflogs. If\n NULL and fetching, the default is \"fetch \n\", where \n is\n the name of the remote (or its url, for in-memory remotes). This\n parameter is ignored when pushing." } ], - "argline": "git_remote *remote, const git_remote_callbacks *callbacks, int update_fetchhead, git_remote_autotag_option_t download_tags, const char *reflog_message", - "sig": "git_remote *::const git_remote_callbacks *::int::git_remote_autotag_option_t::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Update the tips to the new state

\n", - "comments": "", + "argline": "git_remote *remote, const git_remote_callbacks *callbacks, unsigned int update_flags, git_remote_autotag_option_t download_tags, const char *reflog_message", + "sig": "git_remote *::const git_remote_callbacks *::unsigned int::git_remote_autotag_option_t::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Update the tips to the new state.

\n", + "comments": "

If callbacks are not specified then the callbacks specified to git_remote_connect will be used (if it was called).

\n", "group": "remote" }, "git_remote_fetch": { "type": "function", - "file": "remote.h", - "line": 727, - "lineto": 731, + "file": "git2/remote.h", + "line": 1106, + "lineto": 1110, "args": [ { "name": "remote", @@ -17027,7 +17521,7 @@ { "name": "opts", "type": "const git_fetch_options *", - "comment": "options to use for this fetch" + "comment": "options to use for this fetch or NULL" }, { "name": "reflog_message", @@ -17037,24 +17531,17 @@ ], "argline": "git_remote *remote, const git_strarray *refspecs, const git_fetch_options *opts, const char *reflog_message", "sig": "git_remote *::const git_strarray *::const git_fetch_options *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Download new data and update tips

\n", - "comments": "

Convenience function to connect to a remote, download the data, disconnect and update the remote-tracking branches.

\n", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Download new data and update tips.

\n", + "comments": "

Convenience function to connect to a remote, download the data, disconnect and update the remote-tracking branches.

\n\n

If options are specified and this remote is already connected then the existing remote connection options will be discarded and the remote will now use the new options.

\n", "group": "remote", - "examples": { - "network/fetch.c": [ - "ex/HEAD/network/fetch.html#git_remote_fetch-8" - ] - } + "examples": { "fetch.c": ["ex/v1.9.1/fetch.html#git_remote_fetch-9"] } }, "git_remote_prune": { "type": "function", - "file": "remote.h", - "line": 740, - "lineto": 740, + "file": "git2/remote.h", + "line": 1122, + "lineto": 1124, "args": [ { "name": "remote", @@ -17069,19 +17556,16 @@ ], "argline": "git_remote *remote, const git_remote_callbacks *callbacks", "sig": "git_remote *::const git_remote_callbacks *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Prune tracking refs that are no longer present on remote

\n", - "comments": "", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Prune tracking refs that are no longer present on remote.

\n", + "comments": "

If callbacks are not specified then the callbacks specified to git_remote_connect will be used (if it was called).

\n", "group": "remote" }, "git_remote_push": { "type": "function", - "file": "remote.h", - "line": 752, - "lineto": 754, + "file": "git2/remote.h", + "line": 1139, + "lineto": 1142, "args": [ { "name": "remote", @@ -17101,46 +17585,40 @@ ], "argline": "git_remote *remote, const git_strarray *refspecs, const git_push_options *opts", "sig": "git_remote *::const git_strarray *::const git_push_options *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Perform a push

\n", - "comments": "

Peform all the steps from a push.

\n", - "group": "remote" + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Perform a push.

\n", + "comments": "

If options are specified and this remote is already connected then the existing remote connection options will be discarded and the remote will now use the new options.

\n", + "group": "remote", + "examples": { "push.c": ["ex/v1.9.1/push.html#git_remote_push-4"] } }, "git_remote_stats": { "type": "function", - "file": "remote.h", - "line": 759, - "lineto": 759, + "file": "git2/remote.h", + "line": 1150, + "lineto": 1150, "args": [ { "name": "remote", "type": "git_remote *", - "comment": null + "comment": "the remote to get statistics for" } ], "argline": "git_remote *remote", "sig": "git_remote *", "return": { - "type": "const git_transfer_progress *", - "comment": null + "type": "const git_indexer_progress *", + "comment": " the git_indexer_progress for the remote" }, "description": "

Get the statistics structure that is filled in by the fetch operation.

\n", "comments": "", "group": "remote", - "examples": { - "network/fetch.c": [ - "ex/HEAD/network/fetch.html#git_remote_stats-9" - ] - } + "examples": { "fetch.c": ["ex/v1.9.1/fetch.html#git_remote_stats-10"] } }, "git_remote_autotag": { "type": "function", - "file": "remote.h", - "line": 767, - "lineto": 767, + "file": "git2/remote.h", + "line": 1158, + "lineto": 1158, "args": [ { "name": "remote", @@ -17160,9 +17638,9 @@ }, "git_remote_set_autotag": { "type": "function", - "file": "remote.h", - "line": 779, - "lineto": 779, + "file": "git2/remote.h", + "line": 1171, + "lineto": 1171, "args": [ { "name": "repo", @@ -17182,19 +17660,16 @@ ], "argline": "git_repository *repo, const char *remote, git_remote_autotag_option_t value", "sig": "git_repository *::const char *::git_remote_autotag_option_t", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0, or an error code." }, "description": "

Set the remote's tag following setting.

\n", "comments": "

The change will be made in the configuration. No loaded remotes will be affected.

\n", "group": "remote" }, "git_remote_prune_refs": { "type": "function", - "file": "remote.h", - "line": 786, - "lineto": 786, + "file": "git2/remote.h", + "line": 1179, + "lineto": 1179, "args": [ { "name": "remote", @@ -17204,19 +17679,16 @@ ], "argline": "const git_remote *remote", "sig": "const git_remote *", - "return": { - "type": "int", - "comment": " the ref-prune setting" - }, + "return": { "type": "int", "comment": " the ref-prune setting" }, "description": "

Retrieve the ref-prune setting

\n", "comments": "", "group": "remote" }, "git_remote_rename": { "type": "function", - "file": "remote.h", - "line": 808, - "lineto": 812, + "file": "git2/remote.h", + "line": 1201, + "lineto": 1205, "args": [ { "name": "problems", @@ -17248,39 +17720,37 @@ "description": "

Give the remote a new name

\n", "comments": "

All remote-tracking branches and configuration settings for the remote are updated.

\n\n

The new name will be checked for validity. See git_tag_create() for rules about valid names.

\n\n

No loaded instances of a the remote with the old name will change their name or their list of refspecs.

\n", "group": "remote", - "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_rename-12" - ] - } + "examples": { "remote.c": ["ex/v1.9.1/remote.html#git_remote_rename-9"] } }, - "git_remote_is_valid_name": { + "git_remote_name_is_valid": { "type": "function", - "file": "remote.h", - "line": 820, - "lineto": 820, + "file": "git2/remote.h", + "line": 1214, + "lineto": 1214, "args": [ + { + "name": "valid", + "type": "int *", + "comment": "output pointer to set with validity of given remote name" + }, { "name": "remote_name", "type": "const char *", "comment": "name to be checked." } ], - "argline": "const char *remote_name", - "sig": "const char *", - "return": { - "type": "int", - "comment": " 1 if the reference name is acceptable; 0 if it isn't" - }, + "argline": "int *valid, const char *remote_name", + "sig": "int *::const char *", + "return": { "type": "int", "comment": " 0 on success or an error code" }, "description": "

Ensure the remote name is well-formed.

\n", "comments": "", "group": "remote" }, "git_remote_delete": { "type": "function", - "file": "remote.h", - "line": 832, - "lineto": 832, + "file": "git2/remote.h", + "line": 1226, + "lineto": 1226, "args": [ { "name": "repo", @@ -17302,28 +17772,20 @@ "description": "

Delete an existing persisted remote.

\n", "comments": "

All remote-tracking branches and configuration settings for the remote will be removed.

\n", "group": "remote", - "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_remote_delete-13" - ] - } + "examples": { "remote.c": ["ex/v1.9.1/remote.html#git_remote_delete-10"] } }, "git_remote_default_branch": { "type": "function", - "file": "remote.h", - "line": 850, - "lineto": 850, + "file": "git2/remote.h", + "line": 1244, + "lineto": 1244, "args": [ { "name": "out", "type": "git_buf *", - "comment": "the buffern in which to store the reference name" + "comment": "the buffer in which to store the reference name" }, - { - "name": "remote", - "type": "git_remote *", - "comment": "the remote" - } + { "name": "remote", "type": "git_remote *", "comment": "the remote" } ], "argline": "git_buf *out, git_remote *remote", "sig": "git_buf *::git_remote *", @@ -17337,9 +17799,9 @@ }, "git_repository_open": { "type": "function", - "file": "repository.h", - "line": 37, - "lineto": 37, + "file": "git2/repository.h", + "line": 43, + "lineto": 43, "args": [ { "name": "out", @@ -17354,27 +17816,19 @@ ], "argline": "git_repository **out, const char *path", "sig": "git_repository **::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Open a git repository.

\n", - "comments": "

The 'path' argument must point to either a git repository folder, or an existing work dir.

\n\n

The method will automatically detect if 'path' is a normal or bare repository or fail is 'path' is neither.

\n", + "comments": "

The 'path' argument must point to either a git repository folder, or an existing work dir.

\n\n

The method will automatically detect if 'path' is a normal or bare repository or fail is 'path' is neither.

\n\n

Note that the libgit2 library must be initialized using git_libgit2_init before any APIs can be called, including this one.

\n", "group": "repository", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_repository_open-68" - ], - "remote.c": [ - "ex/HEAD/remote.html#git_repository_open-14" - ] + "general.c": ["ex/v1.9.1/general.html#git_repository_open-75"] } }, "git_repository_open_from_worktree": { "type": "function", - "file": "repository.h", - "line": 48, - "lineto": 48, + "file": "git2/repository.h", + "line": 54, + "lineto": 54, "args": [ { "name": "out", @@ -17389,19 +17843,16 @@ ], "argline": "git_repository **out, git_worktree *wt", "sig": "git_repository **::git_worktree *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Open working tree as a repository

\n", "comments": "

Open the working directory of the working tree as a normal repository that can then be worked on.

\n", "group": "repository" }, "git_repository_wrap_odb": { "type": "function", - "file": "repository.h", - "line": 61, - "lineto": 61, + "file": "git2/repository.h", + "line": 67, + "lineto": 69, "args": [ { "name": "out", @@ -17416,19 +17867,16 @@ ], "argline": "git_repository **out, git_odb *odb", "sig": "git_repository **::git_odb *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a "fake" repository to wrap an object database

\n", "comments": "

Create a repository object to wrap an object database to be used with the API when all you have is an object database. This doesn't have any paths associated with it, so use with care.

\n", "group": "repository" }, "git_repository_discover": { "type": "function", - "file": "repository.h", - "line": 89, - "lineto": 93, + "file": "git2/repository.h", + "line": 101, + "lineto": 105, "args": [ { "name": "out", @@ -17453,24 +17901,16 @@ ], "argline": "git_buf *out, const char *start_path, int across_fs, const char *ceiling_dirs", "sig": "git_buf *::const char *::int::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Look for a git repository and copy its path in the given buffer.\n The lookup start from base_path and walk across parent directories\n if nothing has been found. The lookup ends when the first repository\n is found, or when reaching a directory referenced in ceiling_dirs\n or when the filesystem changes (in case across_fs is true).

\n", - "comments": "

The method will automatically detect if the repository is bare (if there is a repository).

\n", - "group": "repository", - "examples": { - "remote.c": [ - "ex/HEAD/remote.html#git_repository_discover-15" - ] - } + "comments": "

The method will automatically detect if the repository is bare (if there is a repository).

\n\n

Note that the libgit2 library must be initialized using git_libgit2_init before any APIs can be called, including this one.

\n", + "group": "repository" }, "git_repository_open_ext": { "type": "function", - "file": "repository.h", - "line": 152, - "lineto": 156, + "file": "git2/repository.h", + "line": 181, + "lineto": 185, "args": [ { "name": "out", @@ -17500,44 +17940,15 @@ "comment": " 0 on success, GIT_ENOTFOUND if no repository could be found,\n or -1 if there was a repository but open failed for some reason\n (such as repo corruption or system errors)." }, "description": "

Find and open a repository with extended controls.

\n", - "comments": "", + "comments": "

Note that the libgit2 library must be initialized using git_libgit2_init before any APIs can be called, including this one.

\n", "group": "repository", - "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_repository_open_ext-24" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_repository_open_ext-31" - ], - "describe.c": [ - "ex/HEAD/describe.html#git_repository_open_ext-6" - ], - "diff.c": [ - "ex/HEAD/diff.html#git_repository_open_ext-15" - ], - "log.c": [ - "ex/HEAD/log.html#git_repository_open_ext-45", - "ex/HEAD/log.html#git_repository_open_ext-46" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_repository_open_ext-38" - ], - "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_repository_open_ext-16" - ], - "status.c": [ - "ex/HEAD/status.html#git_repository_open_ext-5" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_repository_open_ext-11" - ] - } + "examples": { "log.c": ["ex/v1.9.1/log.html#git_repository_open_ext-43"] } }, "git_repository_open_bare": { "type": "function", - "file": "repository.h", - "line": 169, - "lineto": 169, + "file": "git2/repository.h", + "line": 202, + "lineto": 202, "args": [ { "name": "out", @@ -17552,19 +17963,16 @@ ], "argline": "git_repository **out, const char *bare_path", "sig": "git_repository **::const char *", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Open a bare repository on the serverside.

\n", - "comments": "

This is a fast open for bare repositories that will come in handy if you're e.g. hosting git repositories and need to access them efficiently

\n", + "comments": "

This is a fast open for bare repositories that will come in handy if you're e.g. hosting git repositories and need to access them efficiently

\n\n

Note that the libgit2 library must be initialized using git_libgit2_init before any APIs can be called, including this one.

\n", "group": "repository" }, "git_repository_free": { "type": "function", - "file": "repository.h", - "line": 182, - "lineto": 182, + "file": "git2/repository.h", + "line": 215, + "lineto": 215, "args": [ { "name": "repo", @@ -17574,57 +17982,20 @@ ], "argline": "git_repository *repo", "sig": "git_repository *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a previously allocated repository

\n", "comments": "

Note that after a repository is free'd, all the objects it has spawned will still exist until they are manually closed by the user with git_object_free, but accessing any of the attributes of an object without a backing repository will result in undefined behavior

\n", "group": "repository", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_repository_free-25" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_repository_free-32" - ], - "describe.c": [ - "ex/HEAD/describe.html#git_repository_free-7" - ], - "diff.c": [ - "ex/HEAD/diff.html#git_repository_free-16" - ], - "general.c": [ - "ex/HEAD/general.html#git_repository_free-69" - ], - "init.c": [ - "ex/HEAD/init.html#git_repository_free-6" - ], - "log.c": [ - "ex/HEAD/log.html#git_repository_free-47" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_repository_free-39" - ], - "network/clone.c": [ - "ex/HEAD/network/clone.html#git_repository_free-3" - ], - "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_repository_free-17" - ], - "status.c": [ - "ex/HEAD/status.html#git_repository_free-6" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_repository_free-12" - ] + "general.c": ["ex/v1.9.1/general.html#git_repository_free-76"], + "init.c": ["ex/v1.9.1/init.html#git_repository_free-4"] } }, "git_repository_init": { "type": "function", - "file": "repository.h", - "line": 199, - "lineto": 202, + "file": "git2/repository.h", + "line": 236, + "lineto": 239, "args": [ { "name": "out", @@ -17644,34 +18015,27 @@ ], "argline": "git_repository **out, const char *path, unsigned int is_bare", "sig": "git_repository **::const char *::unsigned int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Creates a new Git repository in the given folder.

\n", - "comments": "

TODO: - Reinit the repository

\n", + "comments": "

TODO: - Reinit the repository

\n\n

Note that the libgit2 library must be initialized using git_libgit2_init before any APIs can be called, including this one.

\n", "group": "repository", - "examples": { - "init.c": [ - "ex/HEAD/init.html#git_repository_init-7" - ] - } + "examples": { "init.c": ["ex/v1.9.1/init.html#git_repository_init-5"] } }, - "git_repository_init_init_options": { + "git_repository_init_options_init": { "type": "function", - "file": "repository.h", - "line": 311, - "lineto": 313, + "file": "git2/repository.h", + "line": 405, + "lineto": 407, "args": [ { "name": "opts", "type": "git_repository_init_options *", - "comment": "the `git_repository_init_options` struct to initialize" + "comment": "The `git_repository_init_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version of struct; pass `GIT_REPOSITORY_INIT_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_REPOSITORY_INIT_OPTIONS_VERSION`." } ], "argline": "git_repository_init_options *opts, unsigned int version", @@ -17680,15 +18044,15 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_repository_init_options with default values. Equivalent\n to creating an instance with GIT_REPOSITORY_INIT_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_repository_init_options structure

\n", + "comments": "

Initializes a git_repository_init_options with default values. Equivalent to creating an instance with GIT_REPOSITORY_INIT_OPTIONS_INIT.

\n", "group": "repository" }, "git_repository_init_ext": { "type": "function", - "file": "repository.h", - "line": 328, - "lineto": 331, + "file": "git2/repository.h", + "line": 426, + "lineto": 429, "args": [ { "name": "out", @@ -17708,24 +18072,19 @@ ], "argline": "git_repository **out, const char *repo_path, git_repository_init_options *opts", "sig": "git_repository **::const char *::git_repository_init_options *", - "return": { - "type": "int", - "comment": " 0 or an error code on failure." - }, + "return": { "type": "int", "comment": " 0 or an error code on failure." }, "description": "

Create a new Git repository in the given folder with extended controls.

\n", - "comments": "

This will initialize a new git repository (creating the repo_path if requested by flags) and working directory as needed. It will auto-detect the case sensitivity of the file system and if the file system supports file mode bits correctly.

\n", + "comments": "

This will initialize a new git repository (creating the repo_path if requested by flags) and working directory as needed. It will auto-detect the case sensitivity of the file system and if the file system supports file mode bits correctly.

\n\n

Note that the libgit2 library must be initialized using git_libgit2_init before any APIs can be called, including this one.

\n", "group": "repository", "examples": { - "init.c": [ - "ex/HEAD/init.html#git_repository_init_ext-8" - ] + "init.c": ["ex/v1.9.1/init.html#git_repository_init_ext-6"] } }, "git_repository_head": { "type": "function", - "file": "repository.h", - "line": 346, - "lineto": 346, + "file": "git2/repository.h", + "line": 444, + "lineto": 444, "args": [ { "name": "out", @@ -17749,19 +18108,17 @@ "group": "repository", "examples": { "merge.c": [ - "ex/HEAD/merge.html#git_repository_head-40", - "ex/HEAD/merge.html#git_repository_head-41" + "ex/v1.9.1/merge.html#git_repository_head-31", + "ex/v1.9.1/merge.html#git_repository_head-32" ], - "status.c": [ - "ex/HEAD/status.html#git_repository_head-7" - ] + "status.c": ["ex/v1.9.1/status.html#git_repository_head-3"] } }, "git_repository_head_for_worktree": { "type": "function", - "file": "repository.h", - "line": 356, - "lineto": 357, + "file": "git2/repository.h", + "line": 454, + "lineto": 455, "args": [ { "name": "out", @@ -17791,9 +18148,9 @@ }, "git_repository_head_detached": { "type": "function", - "file": "repository.h", - "line": 369, - "lineto": 369, + "file": "git2/repository.h", + "line": 467, + "lineto": 467, "args": [ { "name": "repo", @@ -17811,11 +18168,38 @@ "comments": "

A repository's HEAD is detached when it points directly to a commit instead of a branch.

\n", "group": "repository" }, + "git_repository_head_detached_for_worktree": { + "type": "function", + "file": "git2/repository.h", + "line": 480, + "lineto": 481, + "args": [ + { + "name": "repo", + "type": "git_repository *", + "comment": "a repository object" + }, + { + "name": "name", + "type": "const char *", + "comment": "name of the worktree to retrieve HEAD for" + } + ], + "argline": "git_repository *repo, const char *name", + "sig": "git_repository *::const char *", + "return": { + "type": "int", + "comment": " 1 if HEAD is detached, 0 if its not; error code if\n there was an error" + }, + "description": "

Check if a worktree's HEAD is detached

\n", + "comments": "

A worktree's HEAD is detached when it points directly to a commit instead of a branch.

\n", + "group": "repository" + }, "git_repository_head_unborn": { "type": "function", - "file": "repository.h", - "line": 395, - "lineto": 395, + "file": "git2/repository.h", + "line": 493, + "lineto": 493, "args": [ { "name": "repo", @@ -17835,9 +18219,9 @@ }, "git_repository_is_empty": { "type": "function", - "file": "repository.h", - "line": 407, - "lineto": 407, + "file": "git2/repository.h", + "line": 507, + "lineto": 507, "args": [ { "name": "repo", @@ -17852,14 +18236,14 @@ "comment": " 1 if the repository is empty, 0 if it isn't, error code\n if the repository is corrupted" }, "description": "

Check if a repository is empty

\n", - "comments": "

An empty repository has just been initialized and contains no references apart from HEAD, which must be pointing to the unborn master branch.

\n", + "comments": "

An empty repository has just been initialized and contains no references apart from HEAD, which must be pointing to the unborn master branch, or the branch specified for the repository in the init.defaultBranch configuration variable.

\n", "group": "repository" }, "git_repository_item_path": { "type": "function", - "file": "repository.h", - "line": 443, - "lineto": 443, + "file": "git2/repository.h", + "line": 545, + "lineto": 545, "args": [ { "name": "out", @@ -17889,9 +18273,9 @@ }, "git_repository_path": { "type": "function", - "file": "repository.h", - "line": 454, - "lineto": 454, + "file": "git2/repository.h", + "line": 556, + "lineto": 556, "args": [ { "name": "repo", @@ -17909,19 +18293,15 @@ "comments": "

This is the path of the .git folder for normal repositories, or of the repository itself for bare repositories.

\n", "group": "repository", "examples": { - "init.c": [ - "ex/HEAD/init.html#git_repository_path-9" - ], - "status.c": [ - "ex/HEAD/status.html#git_repository_path-8" - ] + "init.c": ["ex/v1.9.1/init.html#git_repository_path-7"], + "status.c": ["ex/v1.9.1/status.html#git_repository_path-4"] } }, "git_repository_workdir": { "type": "function", - "file": "repository.h", - "line": 465, - "lineto": 465, + "file": "git2/repository.h", + "line": 567, + "lineto": 567, "args": [ { "name": "repo", @@ -17938,17 +18318,13 @@ "description": "

Get the path of the working directory for this repository

\n", "comments": "

If the repository is bare, this function will always return NULL.

\n", "group": "repository", - "examples": { - "init.c": [ - "ex/HEAD/init.html#git_repository_workdir-10" - ] - } + "examples": { "init.c": ["ex/v1.9.1/init.html#git_repository_workdir-8"] } }, "git_repository_commondir": { "type": "function", - "file": "repository.h", - "line": 476, - "lineto": 476, + "file": "git2/repository.h", + "line": 579, + "lineto": 579, "args": [ { "name": "repo", @@ -17962,15 +18338,15 @@ "type": "const char *", "comment": " the path to the common dir" }, - "description": "

Get the path of the shared common directory for this repository

\n", - "comments": "

If the repository is bare is not a worktree, the git directory path is returned.

\n", + "description": "

Get the path of the shared common directory for this repository.

\n", + "comments": "

If the repository is bare, it is the root directory for the repository. If the repository is a worktree, it is the parent repo's gitdir. Otherwise, it is the gitdir.

\n", "group": "repository" }, "git_repository_set_workdir": { "type": "function", - "file": "repository.h", - "line": 495, - "lineto": 496, + "file": "git2/repository.h", + "line": 598, + "lineto": 599, "args": [ { "name": "repo", @@ -17990,19 +18366,16 @@ ], "argline": "git_repository *repo, const char *workdir, int update_gitlink", "sig": "git_repository *::const char *::int", - "return": { - "type": "int", - "comment": " 0, or an error code" - }, + "return": { "type": "int", "comment": " 0, or an error code" }, "description": "

Set the path to the working directory for this repository

\n", "comments": "

The working directory doesn't need to be the same one that contains the .git folder for this repository.

\n\n

If this repository is bare, setting its working directory will turn it into a normal repository, capable of performing all the common workdir operations (checkout, status, index manipulation, etc).

\n", "group": "repository" }, "git_repository_is_bare": { "type": "function", - "file": "repository.h", - "line": 504, - "lineto": 504, + "file": "git2/repository.h", + "line": 607, + "lineto": 607, "args": [ { "name": "repo", @@ -18020,16 +18393,14 @@ "comments": "", "group": "repository", "examples": { - "status.c": [ - "ex/HEAD/status.html#git_repository_is_bare-9" - ] + "status.c": ["ex/v1.9.1/status.html#git_repository_is_bare-5"] } }, "git_repository_is_worktree": { "type": "function", - "file": "repository.h", - "line": 512, - "lineto": 512, + "file": "git2/repository.h", + "line": 615, + "lineto": 615, "args": [ { "name": "repo", @@ -18049,9 +18420,9 @@ }, "git_repository_config": { "type": "function", - "file": "repository.h", - "line": 528, - "lineto": 528, + "file": "git2/repository.h", + "line": 631, + "lineto": 631, "args": [ { "name": "out", @@ -18066,19 +18437,19 @@ ], "argline": "git_config **out, git_repository *repo", "sig": "git_config **::git_repository *", - "return": { - "type": "int", - "comment": " 0, or an error code" - }, + "return": { "type": "int", "comment": " 0, or an error code" }, "description": "

Get the configuration file for this repository.

\n", "comments": "

If a configuration file has not been set, the default config set for the repository will be returned, including global and system configurations (if they are available).

\n\n

The configuration file must be freed once it's no longer being used by the user.

\n", - "group": "repository" + "group": "repository", + "examples": { + "config.c": ["ex/v1.9.1/config.html#git_repository_config-9"] + } }, "git_repository_config_snapshot": { "type": "function", - "file": "repository.h", - "line": 544, - "lineto": 544, + "file": "git2/repository.h", + "line": 647, + "lineto": 647, "args": [ { "name": "out", @@ -18093,25 +18464,22 @@ ], "argline": "git_config **out, git_repository *repo", "sig": "git_config **::git_repository *", - "return": { - "type": "int", - "comment": " 0, or an error code" - }, + "return": { "type": "int", "comment": " 0, or an error code" }, "description": "

Get a snapshot of the repository's configuration

\n", "comments": "

Convenience function to take a snapshot from the repository's configuration. The contents of this snapshot will not change, even if the underlying config files are modified.

\n\n

The configuration file must be freed once it's no longer being used by the user.

\n", "group": "repository", "examples": { "general.c": [ - "ex/HEAD/general.html#git_repository_config_snapshot-70", - "ex/HEAD/general.html#git_repository_config_snapshot-71" + "ex/v1.9.1/general.html#git_repository_config_snapshot-77", + "ex/v1.9.1/general.html#git_repository_config_snapshot-78" ] } }, "git_repository_odb": { "type": "function", - "file": "repository.h", - "line": 560, - "lineto": 560, + "file": "git2/repository.h", + "line": 663, + "lineto": 663, "args": [ { "name": "out", @@ -18126,27 +18494,20 @@ ], "argline": "git_odb **out, git_repository *repo", "sig": "git_odb **::git_repository *", - "return": { - "type": "int", - "comment": " 0, or an error code" - }, + "return": { "type": "int", "comment": " 0, or an error code" }, "description": "

Get the Object Database for this repository.

\n", "comments": "

If a custom ODB has not been set, the default database for the repository will be returned (the one located in .git/objects).

\n\n

The ODB must be freed once it's no longer being used by the user.

\n", "group": "repository", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_repository_odb-33" - ], - "general.c": [ - "ex/HEAD/general.html#git_repository_odb-72" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_repository_odb-29"], + "general.c": ["ex/v1.9.1/general.html#git_repository_odb-79"] } }, "git_repository_refdb": { "type": "function", - "file": "repository.h", - "line": 576, - "lineto": 576, + "file": "git2/repository.h", + "line": 679, + "lineto": 679, "args": [ { "name": "out", @@ -18161,19 +18522,16 @@ ], "argline": "git_refdb **out, git_repository *repo", "sig": "git_refdb **::git_repository *", - "return": { - "type": "int", - "comment": " 0, or an error code" - }, + "return": { "type": "int", "comment": " 0, or an error code" }, "description": "

Get the Reference Database Backend for this repository.

\n", "comments": "

If a custom refsdb has not been set, the default database for the repository will be returned (the one that manipulates loose and packed references in the .git directory).

\n\n

The refdb must be freed once it's no longer being used by the user.

\n", "group": "repository" }, "git_repository_index": { "type": "function", - "file": "repository.h", - "line": 592, - "lineto": 592, + "file": "git2/repository.h", + "line": 695, + "lineto": 695, "args": [ { "name": "out", @@ -18188,30 +18546,24 @@ ], "argline": "git_index **out, git_repository *repo", "sig": "git_index **::git_repository *", - "return": { - "type": "int", - "comment": " 0, or an error code" - }, + "return": { "type": "int", "comment": " 0, or an error code" }, "description": "

Get the Index file for this repository.

\n", "comments": "

If a custom index has not been set, the default index for the repository will be returned (the one located in .git/index).

\n\n

The index must be freed once it's no longer being used by the user.

\n", "group": "repository", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_repository_index-73" - ], - "init.c": [ - "ex/HEAD/init.html#git_repository_index-11" - ], - "merge.c": [ - "ex/HEAD/merge.html#git_repository_index-42" - ] + "add.c": ["ex/v1.9.1/add.html#git_repository_index-5"], + "commit.c": ["ex/v1.9.1/commit.html#git_repository_index-8"], + "general.c": ["ex/v1.9.1/general.html#git_repository_index-80"], + "init.c": ["ex/v1.9.1/init.html#git_repository_index-9"], + "ls-files.c": ["ex/v1.9.1/ls-files.html#git_repository_index-5"], + "merge.c": ["ex/v1.9.1/merge.html#git_repository_index-33"] } }, "git_repository_message": { "type": "function", - "file": "repository.h", - "line": 610, - "lineto": 610, + "file": "git2/repository.h", + "line": 713, + "lineto": 713, "args": [ { "name": "out", @@ -18236,31 +18588,28 @@ }, "git_repository_message_remove": { "type": "function", - "file": "repository.h", - "line": 617, - "lineto": 617, + "file": "git2/repository.h", + "line": 723, + "lineto": 723, "args": [ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "Repository to remove prepared message from." } ], "argline": "git_repository *repo", "sig": "git_repository *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Remove git's prepared message.

\n", "comments": "

Remove the message that git_repository_message retrieves.

\n", "group": "repository" }, "git_repository_state_cleanup": { "type": "function", - "file": "repository.h", - "line": 626, - "lineto": 626, + "file": "git2/repository.h", + "line": 732, + "lineto": 732, "args": [ { "name": "repo", @@ -18270,24 +18619,19 @@ ], "argline": "git_repository *repo", "sig": "git_repository *", - "return": { - "type": "int", - "comment": " 0 on success, or error" - }, + "return": { "type": "int", "comment": " 0 on success, or error" }, "description": "

Remove all the metadata associated with an ongoing command like merge,\n revert, cherry-pick, etc. For example: MERGE_HEAD, MERGE_MSG, etc.

\n", "comments": "", "group": "repository", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_repository_state_cleanup-43" - ] + "merge.c": ["ex/v1.9.1/merge.html#git_repository_state_cleanup-34"] } }, "git_repository_fetchhead_foreach": { "type": "function", - "file": "repository.h", - "line": 645, - "lineto": 648, + "file": "git2/repository.h", + "line": 763, + "lineto": 766, "args": [ { "name": "repo", @@ -18317,9 +18661,9 @@ }, "git_repository_mergehead_foreach": { "type": "function", - "file": "repository.h", - "line": 665, - "lineto": 668, + "file": "git2/repository.h", + "line": 792, + "lineto": 795, "args": [ { "name": "repo", @@ -18349,9 +18693,9 @@ }, "git_repository_hashfile": { "type": "function", - "file": "repository.h", - "line": 693, - "lineto": 698, + "file": "git2/repository.h", + "line": 822, + "lineto": 827, "args": [ { "name": "out", @@ -18366,34 +18710,31 @@ { "name": "path", "type": "const char *", - "comment": "Path to file on disk whose contents should be hashed. If the\n repository is not NULL, this can be a relative path." + "comment": "Path to file on disk whose contents should be hashed. This\n may be an absolute path or a relative path, in which case it\n will be treated as a path within the working directory." }, { "name": "type", - "type": "git_otype", - "comment": "The object type to hash as (e.g. GIT_OBJ_BLOB)" + "type": "git_object_t", + "comment": "The object type to hash as (e.g. GIT_OBJECT_BLOB)" }, { "name": "as_path", "type": "const char *", - "comment": "The path to use to look up filtering rules. If this is\n NULL, then the `path` parameter will be used instead. If\n this is passed as the empty string, then no filters will be\n applied when calculating the hash." + "comment": "The path to use to look up filtering rules. If this is\n an empty string then no filters will be applied when\n calculating the hash. If this is `NULL` and the `path`\n parameter is a file within the repository's working\n directory, then the `path` will be used." } ], - "argline": "git_oid *out, git_repository *repo, const char *path, git_otype type, const char *as_path", - "sig": "git_oid *::git_repository *::const char *::git_otype::const char *", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, + "argline": "git_oid *out, git_repository *repo, const char *path, git_object_t type, const char *as_path", + "sig": "git_oid *::git_repository *::const char *::git_object_t::const char *", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Calculate hash of file using repository filtering rules.

\n", "comments": "

If you simply want to calculate the hash of a file on disk with no filters, you can just use the git_odb_hashfile() API. However, if you want to hash a file in the repository and you want to apply filtering rules (e.g. crlf filters) before generating the SHA, then use this function.

\n\n

Note: if the repository has core.safecrlf set to fail and the filtering triggers that failure, then this function will return an error and not calculate the hash of the file.

\n", "group": "repository" }, "git_repository_set_head": { "type": "function", - "file": "repository.h", - "line": 718, - "lineto": 720, + "file": "git2/repository.h", + "line": 847, + "lineto": 849, "args": [ { "name": "repo", @@ -18408,19 +18749,19 @@ ], "argline": "git_repository *repo, const char *refname", "sig": "git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Make the repository HEAD point to the specified reference.

\n", "comments": "

If the provided reference points to a Tree or a Blob, the HEAD is unaltered and -1 is returned.

\n\n

If the provided reference points to a branch, the HEAD will point to that branch, staying attached, or become attached if it isn't yet. If the branch doesn't exist yet, no error will be return. The HEAD will then be attached to an unborn branch.

\n\n

Otherwise, the HEAD will be detached and will directly point to the Commit.

\n", - "group": "repository" + "group": "repository", + "examples": { + "checkout.c": ["ex/v1.9.1/checkout.html#git_repository_set_head-23"] + } }, "git_repository_set_head_detached": { "type": "function", - "file": "repository.h", - "line": 738, - "lineto": 740, + "file": "git2/repository.h", + "line": 867, + "lineto": 869, "args": [ { "name": "repo", @@ -18428,53 +18769,52 @@ "comment": "Repository pointer" }, { - "name": "commitish", + "name": "committish", "type": "const git_oid *", "comment": "Object id of the Commit the HEAD should point to" } ], - "argline": "git_repository *repo, const git_oid *commitish", + "argline": "git_repository *repo, const git_oid *committish", "sig": "git_repository *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Make the repository HEAD directly point to the Commit.

\n", - "comments": "

If the provided committish cannot be found in the repository, the HEAD is unaltered and GIT_ENOTFOUND is returned.

\n\n

If the provided commitish cannot be peeled into a commit, the HEAD is unaltered and -1 is returned.

\n\n

Otherwise, the HEAD will eventually be detached and will directly point to the peeled Commit.

\n", + "comments": "

If the provided committish cannot be found in the repository, the HEAD is unaltered and GIT_ENOTFOUND is returned.

\n\n

If the provided committish cannot be peeled into a commit, the HEAD is unaltered and -1 is returned.

\n\n

Otherwise, the HEAD will eventually be detached and will directly point to the peeled Commit.

\n", "group": "repository" }, "git_repository_set_head_detached_from_annotated": { "type": "function", - "file": "repository.h", - "line": 754, - "lineto": 756, + "file": "git2/repository.h", + "line": 885, + "lineto": 887, "args": [ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "Repository pointer" }, { - "name": "commitish", + "name": "committish", "type": "const git_annotated_commit *", - "comment": null + "comment": "annotated commit to point HEAD to" } ], - "argline": "git_repository *repo, const git_annotated_commit *commitish", + "argline": "git_repository *repo, const git_annotated_commit *committish", "sig": "git_repository *::const git_annotated_commit *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Make the repository HEAD directly point to the Commit.

\n", "comments": "

This behaves like git_repository_set_head_detached() but takes an annotated commit, which lets you specify which extended sha syntax string was specified by a user, allowing for more exact reflog messages.

\n\n

See the documentation for git_repository_set_head_detached().

\n", - "group": "repository" + "group": "repository", + "examples": { + "checkout.c": [ + "ex/v1.9.1/checkout.html#git_repository_set_head_detached_from_annotated-24" + ] + } }, "git_repository_detach_head": { "type": "function", - "file": "repository.h", - "line": 775, - "lineto": 776, + "file": "git2/repository.h", + "line": 906, + "lineto": 907, "args": [ { "name": "repo", @@ -18489,14 +18829,14 @@ "comment": " 0 on success, GIT_EUNBORNBRANCH when HEAD points to a non existing\n branch or an error code" }, "description": "

Detach the HEAD.

\n", - "comments": "

If the HEAD is already detached and points to a Commit, 0 is returned.

\n\n

If the HEAD is already detached and points to a Tag, the HEAD is updated into making it point to the peeled Commit, and 0 is returned.

\n\n

If the HEAD is already detached and points to a non commitish, the HEAD is unaltered, and -1 is returned.

\n\n

Otherwise, the HEAD will be detached and point to the peeled Commit.

\n", + "comments": "

If the HEAD is already detached and points to a Commit, 0 is returned.

\n\n

If the HEAD is already detached and points to a Tag, the HEAD is updated into making it point to the peeled Commit, and 0 is returned.

\n\n

If the HEAD is already detached and points to a non committish, the HEAD is unaltered, and -1 is returned.

\n\n

Otherwise, the HEAD will be detached and point to the peeled Commit.

\n", "group": "repository" }, "git_repository_state": { "type": "function", - "file": "repository.h", - "line": 806, - "lineto": 806, + "file": "git2/repository.h", + "line": 937, + "lineto": 937, "args": [ { "name": "repo", @@ -18506,30 +18846,22 @@ ], "argline": "git_repository *repo", "sig": "git_repository *", - "return": { - "type": "int", - "comment": " The state of the repository" - }, + "return": { "type": "int", "comment": " The state of the repository" }, "description": "

Determines the status of a git repository - ie, whether an operation\n (merge, cherry-pick, etc) is in progress.

\n", "comments": "", "group": "repository", "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_repository_state-44" - ] + "checkout.c": ["ex/v1.9.1/checkout.html#git_repository_state-25"], + "merge.c": ["ex/v1.9.1/merge.html#git_repository_state-35"] } }, "git_repository_set_namespace": { "type": "function", - "file": "repository.h", - "line": 820, - "lineto": 820, + "file": "git2/repository.h", + "line": 951, + "lineto": 951, "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "The repo" - }, + { "name": "repo", "type": "git_repository *", "comment": "The repo" }, { "name": "nmspace", "type": "const char *", @@ -18538,25 +18870,18 @@ ], "argline": "git_repository *repo, const char *nmspace", "sig": "git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 on success, -1 on error" - }, + "return": { "type": "int", "comment": " 0 on success, -1 on error" }, "description": "

Sets the active namespace for this Git Repository

\n", "comments": "

This namespace affects all reference operations for the repo. See man gitnamespaces

\n", "group": "repository" }, "git_repository_get_namespace": { "type": "function", - "file": "repository.h", - "line": 828, - "lineto": 828, + "file": "git2/repository.h", + "line": 959, + "lineto": 959, "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "The repo" - } + { "name": "repo", "type": "git_repository *", "comment": "The repo" } ], "argline": "git_repository *repo", "sig": "git_repository *", @@ -18570,9 +18895,9 @@ }, "git_repository_is_shallow": { "type": "function", - "file": "repository.h", - "line": 837, - "lineto": 837, + "file": "git2/repository.h", + "line": 968, + "lineto": 968, "args": [ { "name": "repo", @@ -18582,19 +18907,16 @@ ], "argline": "git_repository *repo", "sig": "git_repository *", - "return": { - "type": "int", - "comment": " 1 if shallow, zero if not" - }, + "return": { "type": "int", "comment": " 1 if shallow, zero if not" }, "description": "

Determine if the repository was a shallow clone

\n", "comments": "", "group": "repository" }, "git_repository_ident": { "type": "function", - "file": "repository.h", - "line": 849, - "lineto": 849, + "file": "git2/repository.h", + "line": 981, + "lineto": 981, "args": [ { "name": "name", @@ -18614,19 +18936,16 @@ ], "argline": "const char **name, const char **email, const git_repository *repo", "sig": "const char **::const char **::const git_repository *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Retrieve the configured identity to use for reflogs

\n", "comments": "

The memory is owned by the repository and must not be freed by the user.

\n", "group": "repository" }, "git_repository_set_ident": { "type": "function", - "file": "repository.h", - "line": 862, - "lineto": 862, + "file": "git2/repository.h", + "line": 995, + "lineto": 995, "args": [ { "name": "repo", @@ -18646,17 +18965,57 @@ ], "argline": "git_repository *repo, const char *name, const char *email", "sig": "git_repository *::const char *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code." }, "description": "

Set the identity to be used for writing reflogs

\n", "comments": "

If both are set, this name and email will be used to write to the reflog. Pass NULL to unset. When unset, the identity will be taken from the repository's configuration.

\n", "group": "repository" }, + "git_repository_oid_type": { + "type": "function", + "file": "git2/repository.h", + "line": 1003, + "lineto": 1003, + "args": [ + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository" + } + ], + "argline": "git_repository *repo", + "sig": "git_repository *", + "return": { "type": "git_oid_t", "comment": " the object id type" }, + "description": "

Gets the object type used by this repository.

\n", + "comments": "", + "group": "repository" + }, + "git_repository_commit_parents": { + "type": "function", + "file": "git2/repository.h", + "line": 1014, + "lineto": 1014, + "args": [ + { + "name": "commits", + "type": "git_commitarray *", + "comment": "a `git_commitarray` that will contain the commit parents" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository" + } + ], + "argline": "git_commitarray *commits, git_repository *repo", + "sig": "git_commitarray *::git_repository *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Gets the parents of the next commit, given the current repository state.\n Generally, this is the HEAD commit, except when performing a merge, in\n which case it is two or more commits.

\n", + "comments": "", + "group": "repository" + }, "git_reset": { "type": "function", - "file": "reset.h", + "file": "git2/reset.h", "line": 62, "lineto": 66, "args": [ @@ -18668,7 +19027,7 @@ { "name": "target", "type": "const git_object *", - "comment": "Committish to which the Head should be moved to. This object\n must belong to the given `repo` and can either be a git_commit or a\n git_tag. When a git_tag is being passed, it should be dereferencable\n to a git_commit which oid will be used as the target of the branch." + "comment": "Committish to which the Head should be moved to. This object\n must belong to the given `repo` and can either be a git_commit or a\n git_tag. When a git_tag is being passed, it should be dereferenceable\n to a git_commit which oid will be used as the target of the branch." }, { "name": "reset_type", @@ -18683,56 +19042,50 @@ ], "argline": "git_repository *repo, const git_object *target, git_reset_t reset_type, const git_checkout_options *checkout_opts", "sig": "git_repository *::const git_object *::git_reset_t::const git_checkout_options *", - "return": { - "type": "int", - "comment": " 0 on success or an error code" - }, + "return": { "type": "int", "comment": " 0 on success or an error code" }, "description": "

Sets the current head to the specified commit oid and optionally\n resets the index and working tree to match.

\n", "comments": "

SOFT reset means the Head will be moved to the commit.

\n\n

MIXED reset will trigger a SOFT reset, plus the index will be replaced with the content of the commit tree.

\n\n

HARD reset will trigger a MIXED reset and the working directory will be replaced with the content of the index. (Untracked and ignored files will be left alone, however.)

\n\n

TODO: Implement remaining kinds of resets.

\n", "group": "reset" }, "git_reset_from_annotated": { "type": "function", - "file": "reset.h", - "line": 80, - "lineto": 84, + "file": "git2/reset.h", + "line": 92, + "lineto": 96, "args": [ { "name": "repo", "type": "git_repository *", - "comment": null + "comment": "Repository where to perform the reset operation." }, { - "name": "commit", + "name": "target", "type": "const git_annotated_commit *", - "comment": null + "comment": "Annotated commit to which the Head should be moved to.\n This object must belong to the given `repo`, it will be dereferenced\n to a git_commit which oid will be used as the target of the branch." }, { "name": "reset_type", "type": "git_reset_t", - "comment": null + "comment": "Kind of reset operation to perform." }, { "name": "checkout_opts", "type": "const git_checkout_options *", - "comment": null + "comment": "Optional checkout options to be used for a HARD reset.\n The checkout_strategy field will be overridden (based on reset_type).\n This parameter can be used to propagate notify and progress callbacks." } ], - "argline": "git_repository *repo, const git_annotated_commit *commit, git_reset_t reset_type, const git_checkout_options *checkout_opts", + "argline": "git_repository *repo, const git_annotated_commit *target, git_reset_t reset_type, const git_checkout_options *checkout_opts", "sig": "git_repository *::const git_annotated_commit *::git_reset_t::const git_checkout_options *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success or an error code" }, "description": "

Sets the current head to the specified commit oid and optionally\n resets the index and working tree to match.

\n", "comments": "

This behaves like git_reset() but takes an annotated commit, which lets you specify which extended sha syntax string was specified by a user, allowing for more exact reflog messages.

\n\n

See the documentation for git_reset().

\n", "group": "reset" }, "git_reset_default": { "type": "function", - "file": "reset.h", - "line": 104, - "lineto": 107, + "file": "git2/reset.h", + "line": 116, + "lineto": 119, "args": [ { "name": "repo", @@ -18760,21 +19113,21 @@ "comments": "

The scope of the updated entries is determined by the paths being passed in the pathspec parameters.

\n\n

Passing a NULL target will result in removing entries in the index matching the provided pathspecs.

\n", "group": "reset" }, - "git_revert_init_options": { + "git_revert_options_init": { "type": "function", - "file": "revert.h", - "line": 47, - "lineto": 49, + "file": "git2/revert.h", + "line": 54, + "lineto": 56, "args": [ { "name": "opts", "type": "git_revert_options *", - "comment": "the `git_revert_options` struct to initialize" + "comment": "The `git_revert_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version of struct; pass `GIT_REVERT_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_REVERT_OPTIONS_VERSION`." } ], "argline": "git_revert_options *opts, unsigned int version", @@ -18783,15 +19136,15 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_revert_options with default values. Equivalent to\n creating an instance with GIT_REVERT_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_revert_options structure

\n", + "comments": "

Initializes a git_revert_options with default values. Equivalent to creating an instance with GIT_REVERT_OPTIONS_INIT.

\n", "group": "revert" }, "git_revert_commit": { "type": "function", - "file": "revert.h", - "line": 65, - "lineto": 71, + "file": "git2/revert.h", + "line": 72, + "lineto": 78, "args": [ { "name": "out", @@ -18836,9 +19189,9 @@ }, "git_revert": { "type": "function", - "file": "revert.h", - "line": 81, - "lineto": 84, + "file": "git2/revert.h", + "line": 88, + "lineto": 91, "args": [ { "name": "repo", @@ -18868,7 +19221,7 @@ }, "git_revparse_single": { "type": "function", - "file": "revparse.h", + "file": "git2/revparse.h", "line": 37, "lineto": 38, "args": [ @@ -18898,29 +19251,21 @@ "comments": "

See man gitrevisions, or http://git-scm.com/docs/git-rev-parse.html#_specifying_revisions for information on the syntax accepted.

\n\n

The returned object should be released with git_object_free when no longer needed.

\n", "group": "revparse", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_revparse_single-26" - ], - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_revparse_single-34" - ], - "describe.c": [ - "ex/HEAD/describe.html#git_revparse_single-8" - ], - "log.c": [ - "ex/HEAD/log.html#git_revparse_single-48" - ], + "blame.c": ["ex/v1.9.1/blame.html#git_revparse_single-21"], + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_revparse_single-30"], + "describe.c": ["ex/v1.9.1/describe.html#git_revparse_single-6"], + "log.c": ["ex/v1.9.1/log.html#git_revparse_single-44"], "tag.c": [ - "ex/HEAD/tag.html#git_revparse_single-13", - "ex/HEAD/tag.html#git_revparse_single-14", - "ex/HEAD/tag.html#git_revparse_single-15", - "ex/HEAD/tag.html#git_revparse_single-16" + "ex/v1.9.1/tag.html#git_revparse_single-9", + "ex/v1.9.1/tag.html#git_revparse_single-10", + "ex/v1.9.1/tag.html#git_revparse_single-11", + "ex/v1.9.1/tag.html#git_revparse_single-12" ] } }, "git_revparse_ext": { "type": "function", - "file": "revparse.h", + "file": "git2/revparse.h", "line": 61, "lineto": 65, "args": [ @@ -18953,11 +19298,12 @@ }, "description": "

Find a single object and intermediate reference by a revision string.

\n", "comments": "

See man gitrevisions, or http://git-scm.com/docs/git-rev-parse.html#_specifying_revisions for information on the syntax accepted.

\n\n

In some cases (@{<-n>} or <branchname>@{upstream}), the expression may point to an intermediate reference. When such expressions are being passed in, reference_out will be valued as well.

\n\n

The returned object should be released with git_object_free and the returned reference with git_reference_free when no longer needed.

\n", - "group": "revparse" + "group": "revparse", + "examples": { "commit.c": ["ex/v1.9.1/commit.html#git_revparse_ext-9"] } }, "git_revparse": { "type": "function", - "file": "revparse.h", + "file": "git2/revparse.h", "line": 105, "lineto": 108, "args": [ @@ -18987,21 +19333,17 @@ "comments": "

See man gitrevisions or http://git-scm.com/docs/git-rev-parse.html#_specifying_revisions for information on the syntax accepted.

\n", "group": "revparse", "examples": { - "blame.c": [ - "ex/HEAD/blame.html#git_revparse-27" - ], - "log.c": [ - "ex/HEAD/log.html#git_revparse-49" - ], + "blame.c": ["ex/v1.9.1/blame.html#git_revparse-22"], + "log.c": ["ex/v1.9.1/log.html#git_revparse-45"], "rev-parse.c": [ - "ex/HEAD/rev-parse.html#git_revparse-18", - "ex/HEAD/rev-parse.html#git_revparse-19" + "ex/v1.9.1/rev-parse.html#git_revparse-14", + "ex/v1.9.1/rev-parse.html#git_revparse-15" ] } }, "git_revwalk_new": { "type": "function", - "file": "revwalk.h", + "file": "git2/revwalk.h", "line": 73, "lineto": 73, "args": [ @@ -19018,28 +19360,23 @@ ], "argline": "git_revwalk **out, git_repository *repo", "sig": "git_revwalk **::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Allocate a new revision walker to iterate through a repo.

\n", "comments": "

This revision walker uses a custom memory pool and an internal commit cache, so it is relatively expensive to allocate.

\n\n

For maximum performance, this revision walker should be reused for different walks.

\n\n

This revision walker is not thread safe: it may only be used to walk a repository on a single thread; however, it is possible to have several revision walkers in several different threads walking the same repository.

\n", "group": "revwalk", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_revwalk_new-74" - ], + "general.c": ["ex/v1.9.1/general.html#git_revwalk_new-81"], "log.c": [ - "ex/HEAD/log.html#git_revwalk_new-50", - "ex/HEAD/log.html#git_revwalk_new-51" + "ex/v1.9.1/log.html#git_revwalk_new-46", + "ex/v1.9.1/log.html#git_revwalk_new-47" ] } }, "git_revwalk_reset": { "type": "function", - "file": "revwalk.h", - "line": 88, - "lineto": 88, + "file": "git2/revwalk.h", + "line": 89, + "lineto": 89, "args": [ { "name": "walker", @@ -19049,19 +19386,16 @@ ], "argline": "git_revwalk *walker", "sig": "git_revwalk *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Reset the revision walker for reuse.

\n", "comments": "

This will clear all the pushed and hidden commits, and leave the walker in a blank state (just like at creation) ready to receive new commit pushes and start a new walk.

\n\n

The revision walk is automatically reset when a walk is over.

\n", "group": "revwalk" }, "git_revwalk_push": { "type": "function", - "file": "revwalk.h", - "line": 107, - "lineto": 107, + "file": "git2/revwalk.h", + "line": 108, + "lineto": 108, "args": [ { "name": "walk", @@ -19076,27 +19410,20 @@ ], "argline": "git_revwalk *walk, const git_oid *id", "sig": "git_revwalk *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Add a new root for the traversal

\n", "comments": "

The pushed commit will be marked as one of the roots from which to start the walk. This commit may not be walked if it or a child is hidden.

\n\n

At least one commit must be pushed onto the walker before a walk can be started.

\n\n

The given id must belong to a committish on the walked repository.

\n", "group": "revwalk", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_revwalk_push-75" - ], - "log.c": [ - "ex/HEAD/log.html#git_revwalk_push-52" - ] + "general.c": ["ex/v1.9.1/general.html#git_revwalk_push-82"], + "log.c": ["ex/v1.9.1/log.html#git_revwalk_push-48"] } }, "git_revwalk_push_glob": { "type": "function", - "file": "revwalk.h", - "line": 125, - "lineto": 125, + "file": "git2/revwalk.h", + "line": 126, + "lineto": 126, "args": [ { "name": "walk", @@ -19111,19 +19438,16 @@ ], "argline": "git_revwalk *walk, const char *glob", "sig": "git_revwalk *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Push matching references

\n", "comments": "

The OIDs pointed to by the references that match the given glob pattern will be pushed to the revision walker.

\n\n

A leading 'refs/' is implied if not present as well as a trailing '/*' if the glob lacks '?', '*' or '['.

\n\n

Any references matching this glob which do not point to a committish will be ignored.

\n", "group": "revwalk" }, "git_revwalk_push_head": { "type": "function", - "file": "revwalk.h", - "line": 133, - "lineto": 133, + "file": "git2/revwalk.h", + "line": 134, + "lineto": 134, "args": [ { "name": "walk", @@ -19133,24 +19457,17 @@ ], "argline": "git_revwalk *walk", "sig": "git_revwalk *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Push the repository's HEAD

\n", "comments": "", "group": "revwalk", - "examples": { - "log.c": [ - "ex/HEAD/log.html#git_revwalk_push_head-53" - ] - } + "examples": { "log.c": ["ex/v1.9.1/log.html#git_revwalk_push_head-49"] } }, "git_revwalk_hide": { "type": "function", - "file": "revwalk.h", - "line": 148, - "lineto": 148, + "file": "git2/revwalk.h", + "line": 149, + "lineto": 149, "args": [ { "name": "walk", @@ -19165,24 +19482,17 @@ ], "argline": "git_revwalk *walk, const git_oid *commit_id", "sig": "git_revwalk *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Mark a commit (and its ancestors) uninteresting for the output.

\n", "comments": "

The given id must belong to a committish on the walked repository.

\n\n

The resolved commit and all its parents will be hidden from the output on the revision walk.

\n", "group": "revwalk", - "examples": { - "log.c": [ - "ex/HEAD/log.html#git_revwalk_hide-54" - ] - } + "examples": { "log.c": ["ex/v1.9.1/log.html#git_revwalk_hide-50"] } }, "git_revwalk_hide_glob": { "type": "function", - "file": "revwalk.h", - "line": 167, - "lineto": 167, + "file": "git2/revwalk.h", + "line": 168, + "lineto": 168, "args": [ { "name": "walk", @@ -19197,19 +19507,16 @@ ], "argline": "git_revwalk *walk, const char *glob", "sig": "git_revwalk *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Hide matching references.

\n", "comments": "

The OIDs pointed to by the references that match the given glob pattern and their ancestors will be hidden from the output on the revision walk.

\n\n

A leading 'refs/' is implied if not present as well as a trailing '/*' if the glob lacks '?', '*' or '['.

\n\n

Any references matching this glob which do not point to a committish will be ignored.

\n", "group": "revwalk" }, "git_revwalk_hide_head": { "type": "function", - "file": "revwalk.h", - "line": 175, - "lineto": 175, + "file": "git2/revwalk.h", + "line": 176, + "lineto": 176, "args": [ { "name": "walk", @@ -19219,19 +19526,16 @@ ], "argline": "git_revwalk *walk", "sig": "git_revwalk *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Hide the repository's HEAD

\n", "comments": "", "group": "revwalk" }, "git_revwalk_push_ref": { "type": "function", - "file": "revwalk.h", - "line": 186, - "lineto": 186, + "file": "git2/revwalk.h", + "line": 187, + "lineto": 187, "args": [ { "name": "walk", @@ -19246,19 +19550,16 @@ ], "argline": "git_revwalk *walk, const char *refname", "sig": "git_revwalk *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Push the OID pointed to by a reference

\n", "comments": "

The reference must point to a committish.

\n", "group": "revwalk" }, "git_revwalk_hide_ref": { "type": "function", - "file": "revwalk.h", - "line": 197, - "lineto": 197, + "file": "git2/revwalk.h", + "line": 198, + "lineto": 198, "args": [ { "name": "walk", @@ -19273,19 +19574,16 @@ ], "argline": "git_revwalk *walk, const char *refname", "sig": "git_revwalk *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Hide the OID pointed to by a reference

\n", "comments": "

The reference must point to a committish.

\n", "group": "revwalk" }, "git_revwalk_next": { "type": "function", - "file": "revwalk.h", - "line": 217, - "lineto": 217, + "file": "git2/revwalk.h", + "line": 218, + "lineto": 218, "args": [ { "name": "out", @@ -19308,19 +19606,15 @@ "comments": "

The initial call to this method is not blocking when iterating through a repo with a time-sorting mode.

\n\n

Iterating with Topological or inverted modes makes the initial call blocking to preprocess the commit list, but this block should be mostly unnoticeable on most repositories (topological preprocessing times at 0.3s on the git.git repo).

\n\n

The revision walker is reset when the walk is over.

\n", "group": "revwalk", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_revwalk_next-76" - ], - "log.c": [ - "ex/HEAD/log.html#git_revwalk_next-55" - ] + "general.c": ["ex/v1.9.1/general.html#git_revwalk_next-83"], + "log.c": ["ex/v1.9.1/log.html#git_revwalk_next-51"] } }, "git_revwalk_sorting": { "type": "function", - "file": "revwalk.h", - "line": 228, - "lineto": 228, + "file": "git2/revwalk.h", + "line": 230, + "lineto": 230, "args": [ { "name": "walk", @@ -19335,77 +19629,62 @@ ], "argline": "git_revwalk *walk, unsigned int sort_mode", "sig": "git_revwalk *::unsigned int", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Change the sorting mode when iterating through the\n repository's contents.

\n", "comments": "

Changing the sorting mode resets the walker.

\n", "group": "revwalk", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_revwalk_sorting-77" - ], + "general.c": ["ex/v1.9.1/general.html#git_revwalk_sorting-84"], "log.c": [ - "ex/HEAD/log.html#git_revwalk_sorting-56", - "ex/HEAD/log.html#git_revwalk_sorting-57" + "ex/v1.9.1/log.html#git_revwalk_sorting-52", + "ex/v1.9.1/log.html#git_revwalk_sorting-53" ] } }, "git_revwalk_push_range": { "type": "function", - "file": "revwalk.h", - "line": 243, - "lineto": 243, + "file": "git2/revwalk.h", + "line": 245, + "lineto": 245, "args": [ { "name": "walk", "type": "git_revwalk *", "comment": "the walker being used for the traversal" }, - { - "name": "range", - "type": "const char *", - "comment": "the range" - } + { "name": "range", "type": "const char *", "comment": "the range" } ], "argline": "git_revwalk *walk, const char *range", "sig": "git_revwalk *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Push and hide the respective endpoints of the given range.

\n", "comments": "

The range should be of the form .. where each is in the form accepted by 'git_revparse_single'. The left-hand commit will be hidden and the right-hand commit pushed.

\n", "group": "revwalk" }, "git_revwalk_simplify_first_parent": { "type": "function", - "file": "revwalk.h", - "line": 250, - "lineto": 250, + "file": "git2/revwalk.h", + "line": 255, + "lineto": 255, "args": [ { "name": "walk", "type": "git_revwalk *", - "comment": null + "comment": "The revision walker." } ], "argline": "git_revwalk *walk", "sig": "git_revwalk *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Simplify the history by first-parent

\n", "comments": "

No parents other than the first for each commit will be enqueued.

\n", "group": "revwalk" }, "git_revwalk_free": { "type": "function", - "file": "revwalk.h", - "line": 258, - "lineto": 258, + "file": "git2/revwalk.h", + "line": 263, + "lineto": 263, "args": [ { "name": "walk", @@ -19415,3442 +19694,1757 @@ ], "argline": "git_revwalk *walk", "sig": "git_revwalk *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a revision walker previously allocated.

\n", "comments": "", "group": "revwalk", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_revwalk_free-78" - ], - "log.c": [ - "ex/HEAD/log.html#git_revwalk_free-58" - ] - } - }, - "git_revwalk_repository": { - "type": "function", - "file": "revwalk.h", - "line": 267, - "lineto": 267, - "args": [ - { - "name": "walk", - "type": "git_revwalk *", - "comment": "the revision walker" - } - ], - "argline": "git_revwalk *walk", - "sig": "git_revwalk *", - "return": { - "type": "git_repository *", - "comment": " the repository being walked" - }, - "description": "

Return the repository on which this walker\n is operating.

\n", - "comments": "", - "group": "revwalk" - }, - "git_revwalk_add_hide_cb": { - "type": "function", - "file": "revwalk.h", - "line": 288, - "lineto": 291, - "args": [ - { - "name": "walk", - "type": "git_revwalk *", - "comment": "the revision walker" - }, - { - "name": "hide_cb", - "type": "git_revwalk_hide_cb", - "comment": "callback function to hide a commit and its parents" - }, - { - "name": "payload", - "type": "void *", - "comment": "data payload to be passed to callback function" - } - ], - "argline": "git_revwalk *walk, git_revwalk_hide_cb hide_cb, void *payload", - "sig": "git_revwalk *::git_revwalk_hide_cb::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Adds a callback function to hide a commit and its parents

\n", - "comments": "", - "group": "revwalk" - }, - "git_signature_new": { - "type": "function", - "file": "signature.h", - "line": 37, - "lineto": 37, - "args": [ - { - "name": "out", - "type": "git_signature **", - "comment": "new signature, in case of error NULL" - }, - { - "name": "name", - "type": "const char *", - "comment": "name of the person" - }, - { - "name": "email", - "type": "const char *", - "comment": "email of the person" - }, - { - "name": "time", - "type": "git_time_t", - "comment": "time when the action happened" - }, - { - "name": "offset", - "type": "int", - "comment": "timezone offset in minutes for the time" - } - ], - "argline": "git_signature **out, const char *name, const char *email, git_time_t time, int offset", - "sig": "git_signature **::const char *::const char *::git_time_t::int", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create a new action signature.

\n", - "comments": "

Call git_signature_free() to free the data.

\n\n

Note: angle brackets ('<' and '>') characters are not allowed to be used in either the name or the email parameter.

\n", - "group": "signature", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_signature_new-79", - "ex/HEAD/general.html#git_signature_new-80" - ] - } - }, - "git_signature_now": { - "type": "function", - "file": "signature.h", - "line": 49, - "lineto": 49, - "args": [ - { - "name": "out", - "type": "git_signature **", - "comment": "new signature, in case of error NULL" - }, - { - "name": "name", - "type": "const char *", - "comment": "name of the person" - }, - { - "name": "email", - "type": "const char *", - "comment": "email of the person" - } - ], - "argline": "git_signature **out, const char *name, const char *email", - "sig": "git_signature **::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create a new action signature with a timestamp of 'now'.

\n", - "comments": "

Call git_signature_free() to free the data.

\n", - "group": "signature", - "examples": { - "merge.c": [ - "ex/HEAD/merge.html#git_signature_now-45" - ] - } - }, - "git_signature_default": { - "type": "function", - "file": "signature.h", - "line": 63, - "lineto": 63, - "args": [ - { - "name": "out", - "type": "git_signature **", - "comment": "new signature" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "repository pointer" - } - ], - "argline": "git_signature **out, git_repository *repo", - "sig": "git_signature **::git_repository *", - "return": { - "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if config is missing, or error code" - }, - "description": "

Create a new action signature with default user and now timestamp.

\n", - "comments": "

This looks up the user.name and user.email from the configuration and uses the current time as the timestamp, and creates a new signature based on that information. It will return GIT_ENOTFOUND if either the user.name or user.email are not set.

\n", - "group": "signature", - "examples": { - "init.c": [ - "ex/HEAD/init.html#git_signature_default-12" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_signature_default-17" - ] - } - }, - "git_signature_from_buffer": { - "type": "function", - "file": "signature.h", - "line": 76, - "lineto": 76, - "args": [ - { - "name": "out", - "type": "git_signature **", - "comment": "new signature" - }, - { - "name": "buf", - "type": "const char *", - "comment": "signature string" - } - ], - "argline": "git_signature **out, const char *buf", - "sig": "git_signature **::const char *", - "return": { - "type": "int", - "comment": " 0 on success, or an error code" - }, - "description": "

Create a new signature by parsing the given buffer, which is\n expected to be in the format "Real Name \n<email

\n\n
\n

timestamp tzoffset",\n where timestamp is the number of seconds since the Unix epoch and\n tzoffset is the timezone offset in hhmm format (note the lack\n of a colon separator).

\n
\n", - "comments": "", - "group": "signature" - }, - "git_signature_dup": { - "type": "function", - "file": "signature.h", - "line": 88, - "lineto": 88, - "args": [ - { - "name": "dest", - "type": "git_signature **", - "comment": "pointer where to store the copy" - }, - { - "name": "sig", - "type": "const git_signature *", - "comment": "signature to duplicate" - } - ], - "argline": "git_signature **dest, const git_signature *sig", - "sig": "git_signature **::const git_signature *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create a copy of an existing signature. All internal strings are also\n duplicated.

\n", - "comments": "

Call git_signature_free() to free the data.

\n", - "group": "signature" - }, - "git_signature_free": { - "type": "function", - "file": "signature.h", - "line": 99, - "lineto": 99, - "args": [ - { - "name": "sig", - "type": "git_signature *", - "comment": "signature to free" - } - ], - "argline": "git_signature *sig", - "sig": "git_signature *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Free an existing signature.

\n", - "comments": "

Because the signature is not an opaque structure, it is legal to free it manually, but be sure to free the "name" and "email" strings in addition to the structure itself.

\n", - "group": "signature", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_signature_free-81", - "ex/HEAD/general.html#git_signature_free-82" - ], - "init.c": [ - "ex/HEAD/init.html#git_signature_free-13" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_signature_free-18" - ] - } - }, - "git_stash_apply_init_options": { - "type": "function", - "file": "stash.h", - "line": 153, - "lineto": 154, - "args": [ - { - "name": "opts", - "type": "git_stash_apply_options *", - "comment": "the `git_stash_apply_options` instance to initialize." - }, - { - "name": "version", - "type": "unsigned int", - "comment": "the version of the struct; you should pass\n `GIT_STASH_APPLY_OPTIONS_INIT` here." - } - ], - "argline": "git_stash_apply_options *opts, unsigned int version", - "sig": "git_stash_apply_options *::unsigned int", - "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." - }, - "description": "

Initializes a git_stash_apply_options with default values. Equivalent to\n creating an instance with GIT_STASH_APPLY_OPTIONS_INIT.

\n", - "comments": "", - "group": "stash" - }, - "git_stash_apply": { - "type": "function", - "file": "stash.h", - "line": 182, - "lineto": 185, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "The owning repository." - }, - { - "name": "index", - "type": "size_t", - "comment": "The position within the stash list. 0 points to the\n most recent stashed state." - }, - { - "name": "options", - "type": "const git_stash_apply_options *", - "comment": "Optional options to control how stashes are applied." - } - ], - "argline": "git_repository *repo, size_t index, const git_stash_apply_options *options", - "sig": "git_repository *::size_t::const git_stash_apply_options *", - "return": { - "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if there's no stashed state for the\n given index, GIT_EMERGECONFLICT if changes exist in the working\n directory, or an error code" - }, - "description": "

Apply a single stashed state from the stash list.

\n", - "comments": "

If local changes in the working directory conflict with changes in the stash then GIT_EMERGECONFLICT will be returned. In this case, the index will always remain unmodified and all files in the working directory will remain unmodified. However, if you are restoring untracked files or ignored files and there is a conflict when applying the modified files, then those files will remain in the working directory.

\n\n

If passing the GIT_STASH_APPLY_REINSTATE_INDEX flag and there would be conflicts when reinstating the index, the function will return GIT_EMERGECONFLICT and both the working directory and index will be left unmodified.

\n\n

Note that a minimum checkout strategy of GIT_CHECKOUT_SAFE is implied.

\n", - "group": "stash" - }, - "git_stash_foreach": { - "type": "function", - "file": "stash.h", - "line": 218, - "lineto": 221, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "Repository where to find the stash." - }, - { - "name": "callback", - "type": "git_stash_cb", - "comment": "Callback to invoke per found stashed state. The most\n recent stash state will be enumerated first." - }, - { - "name": "payload", - "type": "void *", - "comment": "Extra parameter to callback function." - } - ], - "argline": "git_repository *repo, git_stash_cb callback, void *payload", - "sig": "git_repository *::git_stash_cb::void *", - "return": { - "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code." - }, - "description": "

Loop over all the stashed states and issue a callback for each one.

\n", - "comments": "

If the callback returns a non-zero value, this will stop looping.

\n", - "group": "stash" - }, - "git_stash_drop": { - "type": "function", - "file": "stash.h", - "line": 234, - "lineto": 236, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "The owning repository." - }, - { - "name": "index", - "type": "size_t", - "comment": "The position within the stash list. 0 points to the\n most recent stashed state." - } - ], - "argline": "git_repository *repo, size_t index", - "sig": "git_repository *::size_t", - "return": { - "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if there's no stashed state for the given\n index, or error code." - }, - "description": "

Remove a single stashed state from the stash list.

\n", - "comments": "", - "group": "stash" - }, - "git_stash_pop": { - "type": "function", - "file": "stash.h", - "line": 250, - "lineto": 253, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "The owning repository." - }, - { - "name": "index", - "type": "size_t", - "comment": "The position within the stash list. 0 points to the\n most recent stashed state." - }, - { - "name": "options", - "type": "const git_stash_apply_options *", - "comment": "Optional options to control how stashes are applied." - } - ], - "argline": "git_repository *repo, size_t index, const git_stash_apply_options *options", - "sig": "git_repository *::size_t::const git_stash_apply_options *", - "return": { - "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if there's no stashed state for the given\n index, or error code. (see git_stash_apply() above for details)" - }, - "description": "

Apply a single stashed state from the stash list and remove it from the list\n if successful.

\n", - "comments": "", - "group": "stash" - }, - "git_status_init_options": { - "type": "function", - "file": "status.h", - "line": 199, - "lineto": 201, - "args": [ - { - "name": "opts", - "type": "git_status_options *", - "comment": "The `git_status_options` instance to initialize." - }, - { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_STATUS_OPTIONS_VERSION`" - } - ], - "argline": "git_status_options *opts, unsigned int version", - "sig": "git_status_options *::unsigned int", - "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." - }, - "description": "

Initializes a git_status_options with default values. Equivalent to\n creating an instance with GIT_STATUS_OPTIONS_INIT.

\n", - "comments": "", - "group": "status" - }, - "git_status_foreach": { - "type": "function", - "file": "status.h", - "line": 239, - "lineto": 242, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" - }, - { - "name": "callback", - "type": "git_status_cb", - "comment": "The function to call on each file" - }, - { - "name": "payload", - "type": "void *", - "comment": "Pointer to pass through to callback function" - } - ], - "argline": "git_repository *repo, git_status_cb callback, void *payload", - "sig": "git_repository *::git_status_cb::void *", - "return": { - "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" - }, - "description": "

Gather file statuses and run a callback for each one.

\n", - "comments": "

The callback is passed the path of the file, the status (a combination of the git_status_t values above) and the payload data pointer passed into this function.

\n\n

If the callback returns a non-zero value, this function will stop looping and return that value to caller.

\n", - "group": "status", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_status_foreach-10" - ] - } - }, - "git_status_foreach_ext": { - "type": "function", - "file": "status.h", - "line": 263, - "lineto": 267, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "Repository object" - }, - { - "name": "opts", - "type": "const git_status_options *", - "comment": "Status options structure" - }, - { - "name": "callback", - "type": "git_status_cb", - "comment": "The function to call on each file" - }, - { - "name": "payload", - "type": "void *", - "comment": "Pointer to pass through to callback function" - } - ], - "argline": "git_repository *repo, const git_status_options *opts, git_status_cb callback, void *payload", - "sig": "git_repository *::const git_status_options *::git_status_cb::void *", - "return": { - "type": "int", - "comment": " 0 on success, non-zero callback return value, or error code" - }, - "description": "

Gather file status information and run callbacks as requested.

\n", - "comments": "

This is an extended version of the git_status_foreach() API that allows for more granular control over which paths will be processed and in what order. See the git_status_options structure for details about the additional controls that this makes available.

\n\n

Note that if a pathspec is given in the git_status_options to filter the status, then the results from rename detection (if you enable it) may not be accurate. To do rename detection properly, this must be called with no pathspec so that all files can be considered.

\n", - "group": "status", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_status_foreach_ext-11" - ] - } - }, - "git_status_file": { - "type": "function", - "file": "status.h", - "line": 295, - "lineto": 298, - "args": [ - { - "name": "status_flags", - "type": "unsigned int *", - "comment": "Output combination of git_status_t values for file" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" - }, - { - "name": "path", - "type": "const char *", - "comment": "The exact path to retrieve status for relative to the\n repository working directory" - } - ], - "argline": "unsigned int *status_flags, git_repository *repo, const char *path", - "sig": "unsigned int *::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if the file is not found in the HEAD,\n index, and work tree, GIT_EAMBIGUOUS if `path` matches multiple files\n or if it refers to a folder, and -1 on other errors." - }, - "description": "

Get file status for a single file.

\n", - "comments": "

This tries to get status for the filename that you give. If no files match that name (in either the HEAD, index, or working directory), this returns GIT_ENOTFOUND.

\n\n

If the name matches multiple files (for example, if the path names a directory or if running on a case- insensitive filesystem and yet the HEAD has two entries that both match the path), then this returns GIT_EAMBIGUOUS because it cannot give correct results.

\n\n

This does not do any sort of rename detection. Renames require a set of targets and because of the path filtering, there is not enough information to check renames correctly. To check file status with rename detection, there is no choice but to do a full git_status_list_new and scan through looking for the path that you are interested in.

\n", - "group": "status" - }, - "git_status_list_new": { - "type": "function", - "file": "status.h", - "line": 313, - "lineto": 316, - "args": [ - { - "name": "out", - "type": "git_status_list **", - "comment": "Pointer to store the status results in" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "Repository object" - }, - { - "name": "opts", - "type": "const git_status_options *", - "comment": "Status options structure" - } - ], - "argline": "git_status_list **out, git_repository *repo, const git_status_options *opts", - "sig": "git_status_list **::git_repository *::const git_status_options *", - "return": { - "type": "int", - "comment": " 0 on success or error code" - }, - "description": "

Gather file status information and populate the git_status_list.

\n", - "comments": "

Note that if a pathspec is given in the git_status_options to filter the status, then the results from rename detection (if you enable it) may not be accurate. To do rename detection properly, this must be called with no pathspec so that all files can be considered.

\n", - "group": "status", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_status_list_new-12", - "ex/HEAD/status.html#git_status_list_new-13" - ] - } - }, - "git_status_list_entrycount": { - "type": "function", - "file": "status.h", - "line": 327, - "lineto": 328, - "args": [ - { - "name": "statuslist", - "type": "git_status_list *", - "comment": "Existing status list object" - } - ], - "argline": "git_status_list *statuslist", - "sig": "git_status_list *", - "return": { - "type": "size_t", - "comment": " the number of status entries" - }, - "description": "

Gets the count of status entries in this list.

\n", - "comments": "

If there are no changes in status (at least according the options given when the status list was created), this can return 0.

\n", - "group": "status", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_status_list_entrycount-14", - "ex/HEAD/status.html#git_status_list_entrycount-15" - ] - } - }, - "git_status_byindex": { - "type": "function", - "file": "status.h", - "line": 339, - "lineto": 341, - "args": [ - { - "name": "statuslist", - "type": "git_status_list *", - "comment": "Existing status list object" - }, - { - "name": "idx", - "type": "size_t", - "comment": "Position of the entry" - } - ], - "argline": "git_status_list *statuslist, size_t idx", - "sig": "git_status_list *::size_t", - "return": { - "type": "const git_status_entry *", - "comment": " Pointer to the entry; NULL if out of bounds" - }, - "description": "

Get a pointer to one of the entries in the status list.

\n", - "comments": "

The entry is not modifiable and should not be freed.

\n", - "group": "status", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_status_byindex-16", - "ex/HEAD/status.html#git_status_byindex-17", - "ex/HEAD/status.html#git_status_byindex-18", - "ex/HEAD/status.html#git_status_byindex-19", - "ex/HEAD/status.html#git_status_byindex-20", - "ex/HEAD/status.html#git_status_byindex-21" - ] - } - }, - "git_status_list_free": { - "type": "function", - "file": "status.h", - "line": 348, - "lineto": 349, - "args": [ - { - "name": "statuslist", - "type": "git_status_list *", - "comment": "Existing status list object" - } - ], - "argline": "git_status_list *statuslist", - "sig": "git_status_list *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Free an existing status list

\n", - "comments": "", - "group": "status", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_status_list_free-22" - ] - } - }, - "git_status_should_ignore": { - "type": "function", - "file": "status.h", - "line": 367, - "lineto": 370, - "args": [ - { - "name": "ignored", - "type": "int *", - "comment": "Boolean returning 0 if the file is not ignored, 1 if it is" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" - }, - { - "name": "path", - "type": "const char *", - "comment": "The file to check ignores for, rooted at the repo's workdir." - } - ], - "argline": "int *ignored, git_repository *repo, const char *path", - "sig": "int *::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 if ignore rules could be processed for the file (regardless\n of whether it exists or not), or an error \n<\n 0 if they could not." - }, - "description": "

Test if the ignore rules apply to a given file.

\n", - "comments": "

This function checks the ignore rules to see if they would apply to the given file. This indicates if the file would be ignored regardless of whether the file is already in the index or committed to the repository.

\n\n

One way to think of this is if you were to do "git add ." on the directory containing the file, would it be added or not?

\n", - "group": "status" - }, - "git_strarray_free": { - "type": "function", - "file": "strarray.h", - "line": 41, - "lineto": 41, - "args": [ - { - "name": "array", - "type": "git_strarray *", - "comment": "git_strarray from which to free string data" - } - ], - "argline": "git_strarray *array", - "sig": "git_strarray *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Close a string array object

\n", - "comments": "

This method should be called on git_strarray objects where the strings array is allocated and contains allocated strings, such as what you would get from git_strarray_copy(). Not doing so, will result in a memory leak.

\n\n

This does not free the git_strarray itself, since the library will never allocate that object directly itself (it is more commonly embedded inside another struct or created on the stack).

\n", - "group": "strarray", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_strarray_free-83" - ], - "remote.c": [ - "ex/HEAD/remote.html#git_strarray_free-16", - "ex/HEAD/remote.html#git_strarray_free-17" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_strarray_free-19" - ] - } - }, - "git_strarray_copy": { - "type": "function", - "file": "strarray.h", - "line": 53, - "lineto": 53, - "args": [ - { - "name": "tgt", - "type": "git_strarray *", - "comment": "target" - }, - { - "name": "src", - "type": "const git_strarray *", - "comment": "source" - } - ], - "argline": "git_strarray *tgt, const git_strarray *src", - "sig": "git_strarray *::const git_strarray *", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n 0 on allocation failure" - }, - "description": "

Copy a string array object from source to target.

\n", - "comments": "

Note: target is overwritten and hence should be empty, otherwise its contents are leaked. Call git_strarray_free() if necessary.

\n", - "group": "strarray" - }, - "git_submodule_update_init_options": { - "type": "function", - "file": "submodule.h", - "line": 170, - "lineto": 171, - "args": [ - { - "name": "opts", - "type": "git_submodule_update_options *", - "comment": "The `git_submodule_update_options` instance to initialize." - }, - { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_SUBMODULE_UPDATE_OPTIONS_VERSION`" - } - ], - "argline": "git_submodule_update_options *opts, unsigned int version", - "sig": "git_submodule_update_options *::unsigned int", - "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." - }, - "description": "

Initializes a git_submodule_update_options with default values.\n Equivalent to creating an instance with GIT_SUBMODULE_UPDATE_OPTIONS_INIT.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_update": { - "type": "function", - "file": "submodule.h", - "line": 191, - "lineto": 191, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Submodule object" - }, - { - "name": "init", - "type": "int", - "comment": "If the submodule is not initialized, setting this flag to true\n will initialize the submodule before updating. Otherwise, this will\n return an error if attempting to update an uninitialzed repository.\n but setting this to true forces them to be updated." - }, - { - "name": "options", - "type": "git_submodule_update_options *", - "comment": "configuration options for the update. If NULL, the\n function works as though GIT_SUBMODULE_UPDATE_OPTIONS_INIT was passed." - } - ], - "argline": "git_submodule *submodule, int init, git_submodule_update_options *options", - "sig": "git_submodule *::int::git_submodule_update_options *", - "return": { - "type": "int", - "comment": " 0 on success, any non-zero return value from a callback\n function, or a negative value to indicate an error (use\n `giterr_last` for a detailed error message)." - }, - "description": "

Update a submodule. This will clone a missing submodule and\n checkout the subrepository to the commit specified in the index of\n the containing repository. If the submodule repository doesn't contain\n the target commit (e.g. because fetchRecurseSubmodules isn't set), then\n the submodule is fetched using the fetch options supplied in options.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_lookup": { - "type": "function", - "file": "submodule.h", - "line": 220, - "lineto": 223, - "args": [ - { - "name": "out", - "type": "git_submodule **", - "comment": "Output ptr to submodule; pass NULL to just get return code" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "The parent repository" - }, - { - "name": "name", - "type": "const char *", - "comment": "The name of or path to the submodule; trailing slashes okay" - } - ], - "argline": "git_submodule **out, git_repository *repo, const char *name", - "sig": "git_submodule **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 on success, GIT_ENOTFOUND if submodule does not exist,\n GIT_EEXISTS if a repository is found in working directory only,\n -1 on other errors." - }, - "description": "

Lookup submodule information by name or path.

\n", - "comments": "

Given either the submodule name or path (they are usually the same), this returns a structure describing the submodule.

\n\n

There are two expected error scenarios:

\n\n\n\n

You must call git_submodule_free when done with the submodule.

\n", - "group": "submodule" - }, - "git_submodule_free": { - "type": "function", - "file": "submodule.h", - "line": 230, - "lineto": 230, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Release a submodule

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_foreach": { - "type": "function", - "file": "submodule.h", - "line": 250, - "lineto": 253, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "The repository" - }, - { - "name": "callback", - "type": "git_submodule_cb", - "comment": "Function to be called with the name of each submodule.\n Return a non-zero value to terminate the iteration." - }, - { - "name": "payload", - "type": "void *", - "comment": "Extra data to pass to callback" - } - ], - "argline": "git_repository *repo, git_submodule_cb callback, void *payload", - "sig": "git_repository *::git_submodule_cb::void *", - "return": { - "type": "int", - "comment": " 0 on success, -1 on error, or non-zero return value of callback" - }, - "description": "

Iterate over all tracked submodules of a repository.

\n", - "comments": "

See the note on git_submodule above. This iterates over the tracked submodules as described therein.

\n\n

If you are concerned about items in the working directory that look like submodules but are not tracked, the diff API will generate a diff record for workdir items that look like submodules but are not tracked, showing them as added in the workdir. Also, the status API will treat the entire subdirectory of a contained git repo as a single GIT_STATUS_WT_NEW item.

\n", - "group": "submodule", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_submodule_foreach-23" - ] - } - }, - "git_submodule_add_setup": { - "type": "function", - "file": "submodule.h", - "line": 280, - "lineto": 285, - "args": [ - { - "name": "out", - "type": "git_submodule **", - "comment": "The newly created submodule ready to open for clone" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "The repository in which you want to create the submodule" - }, - { - "name": "url", - "type": "const char *", - "comment": "URL for the submodule's remote" - }, - { - "name": "path", - "type": "const char *", - "comment": "Path at which the submodule should be created" - }, - { - "name": "use_gitlink", - "type": "int", - "comment": "Should workdir contain a gitlink to the repo in\n .git/modules vs. repo directly in workdir." - } - ], - "argline": "git_submodule **out, git_repository *repo, const char *url, const char *path, int use_gitlink", - "sig": "git_submodule **::git_repository *::const char *::const char *::int", - "return": { - "type": "int", - "comment": " 0 on success, GIT_EEXISTS if submodule already exists,\n -1 on other errors." - }, - "description": "

Set up a new git submodule for checkout.

\n", - "comments": "

This does "git submodule add" up to the fetch and checkout of the submodule contents. It preps a new submodule, creates an entry in .gitmodules and creates an empty initialized repository either at the given path in the working directory or in .git/modules with a gitlink from the working directory to the new repo.

\n\n

To fully emulate "git submodule add" call this function, then open the submodule repo and perform the clone step as needed. Lastly, call git_submodule_add_finalize() to wrap up adding the new submodule and .gitmodules to the index to be ready to commit.

\n\n

You must call git_submodule_free on the submodule object when done.

\n", - "group": "submodule" - }, - "git_submodule_add_finalize": { - "type": "function", - "file": "submodule.h", - "line": 297, - "lineto": 297, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "The submodule to finish adding." - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Resolve the setup of a new git submodule.

\n", - "comments": "

This should be called on a submodule once you have called add setup and done the clone of the submodule. This adds the .gitmodules file and the newly cloned submodule to the index to be ready to be committed (but doesn't actually do the commit).

\n", - "group": "submodule" - }, - "git_submodule_add_to_index": { - "type": "function", - "file": "submodule.h", - "line": 309, - "lineto": 311, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "The submodule to add to the index" - }, - { - "name": "write_index", - "type": "int", - "comment": "Boolean if this should immediately write the index\n file. If you pass this as false, you will have to get the\n git_index and explicitly call `git_index_write()` on it to\n save the change." - } - ], - "argline": "git_submodule *submodule, int write_index", - "sig": "git_submodule *::int", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on failure" - }, - "description": "

Add current submodule HEAD commit to index of superproject.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_owner": { - "type": "function", - "file": "submodule.h", - "line": 324, - "lineto": 324, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "git_repository *", - "comment": " Pointer to `git_repository`" - }, - "description": "

Get the containing repository for a submodule.

\n", - "comments": "

This returns a pointer to the repository that contains the submodule. This is a just a reference to the repository that was passed to the original git_submodule_lookup() call, so if that repository has been freed, then this may be a dangling reference.

\n", - "group": "submodule" - }, - "git_submodule_name": { - "type": "function", - "file": "submodule.h", - "line": 332, - "lineto": 332, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "const char *", - "comment": " Pointer to the submodule name" - }, - "description": "

Get the name of submodule.

\n", - "comments": "", - "group": "submodule", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_submodule_name-24" - ] - } - }, - "git_submodule_path": { - "type": "function", - "file": "submodule.h", - "line": 343, - "lineto": 343, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "const char *", - "comment": " Pointer to the submodule path" - }, - "description": "

Get the path to the submodule.

\n", - "comments": "

The path is almost always the same as the submodule name, but the two are actually not required to match.

\n", - "group": "submodule", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_submodule_path-25" - ] - } - }, - "git_submodule_url": { - "type": "function", - "file": "submodule.h", - "line": 351, - "lineto": 351, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "const char *", - "comment": " Pointer to the submodule url" - }, - "description": "

Get the URL for the submodule.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_resolve_url": { - "type": "function", - "file": "submodule.h", - "line": 361, - "lineto": 361, - "args": [ - { - "name": "out", - "type": "git_buf *", - "comment": "buffer to store the absolute submodule url in" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "Pointer to repository object" - }, - { - "name": "url", - "type": "const char *", - "comment": "Relative url" - } - ], - "argline": "git_buf *out, git_repository *repo, const char *url", - "sig": "git_buf *::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Resolve a submodule url relative to the given repository.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_branch": { - "type": "function", - "file": "submodule.h", - "line": 369, - "lineto": 369, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "const char *", - "comment": " Pointer to the submodule branch" - }, - "description": "

Get the branch for the submodule.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_set_branch": { - "type": "function", - "file": "submodule.h", - "line": 382, - "lineto": 382, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository to affect" - }, - { - "name": "name", - "type": "const char *", - "comment": "the name of the submodule to configure" - }, - { - "name": "branch", - "type": "const char *", - "comment": "Branch that should be used for the submodule" - } - ], - "argline": "git_repository *repo, const char *name, const char *branch", - "sig": "git_repository *::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on failure" - }, - "description": "

Set the branch for the submodule in the configuration

\n", - "comments": "

After calling this, you may wish to call git_submodule_sync() to write the changes to the checked out submodule repository.

\n", - "group": "submodule" - }, - "git_submodule_set_url": { - "type": "function", - "file": "submodule.h", - "line": 396, - "lineto": 396, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository to affect" - }, - { - "name": "name", - "type": "const char *", - "comment": "the name of the submodule to configure" - }, - { - "name": "url", - "type": "const char *", - "comment": "URL that should be used for the submodule" - } - ], - "argline": "git_repository *repo, const char *name, const char *url", - "sig": "git_repository *::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on failure" - }, - "description": "

Set the URL for the submodule in the configuration

\n", - "comments": "

After calling this, you may wish to call git_submodule_sync() to write the changes to the checked out submodule repository.

\n", - "group": "submodule" - }, - "git_submodule_index_id": { - "type": "function", - "file": "submodule.h", - "line": 404, - "lineto": 404, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "const git_oid *", - "comment": " Pointer to git_oid or NULL if submodule is not in index." - }, - "description": "

Get the OID for the submodule in the index.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_head_id": { - "type": "function", - "file": "submodule.h", - "line": 412, - "lineto": 412, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "const git_oid *", - "comment": " Pointer to git_oid or NULL if submodule is not in the HEAD." - }, - "description": "

Get the OID for the submodule in the current HEAD tree.

\n", - "comments": "", - "group": "submodule" - }, - "git_submodule_wd_id": { - "type": "function", - "file": "submodule.h", - "line": 425, - "lineto": 425, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Pointer to submodule object" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "const git_oid *", - "comment": " Pointer to git_oid or NULL if submodule is not checked out." - }, - "description": "

Get the OID for the submodule in the current working directory.

\n", - "comments": "

This returns the OID that corresponds to looking up 'HEAD' in the checked out submodule. If there are pending changes in the index or anything else, this won't notice that. You should call git_submodule_status() for a more complete picture about the state of the working directory.

\n", - "group": "submodule" - }, - "git_submodule_ignore": { - "type": "function", - "file": "submodule.h", - "line": 450, - "lineto": 451, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "The submodule to check" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "git_submodule_ignore_t", - "comment": " The current git_submodule_ignore_t valyue what will be used for\n this submodule." - }, - "description": "

Get the ignore rule that will be used for the submodule.

\n", - "comments": "

These values control the behavior of git_submodule_status() for this submodule. There are four ignore values:

\n\n\n", - "group": "submodule" - }, - "git_submodule_set_ignore": { - "type": "function", - "file": "submodule.h", - "line": 463, - "lineto": 466, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository to affect" - }, - { - "name": "name", - "type": "const char *", - "comment": "the name of the submdule" - }, - { - "name": "ignore", - "type": "git_submodule_ignore_t", - "comment": "The new value for the ignore rule" - } - ], - "argline": "git_repository *repo, const char *name, git_submodule_ignore_t ignore", - "sig": "git_repository *::const char *::git_submodule_ignore_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Set the ignore rule for the submodule in the configuration

\n", - "comments": "

This does not affect any currently-loaded instances.

\n", - "group": "submodule" - }, - "git_submodule_update_strategy": { - "type": "function", - "file": "submodule.h", - "line": 478, - "lineto": 479, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "The submodule to check" - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "git_submodule_update_t", - "comment": " The current git_submodule_update_t value that will be used\n for this submodule." - }, - "description": "

Get the update rule that will be used for the submodule.

\n", - "comments": "

This value controls the behavior of the git submodule update command. There are four useful values documented with git_submodule_update_t.

\n", - "group": "submodule" - }, - "git_submodule_set_update": { - "type": "function", - "file": "submodule.h", - "line": 491, - "lineto": 494, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository to affect" - }, - { - "name": "name", - "type": "const char *", - "comment": "the name of the submodule to configure" - }, - { - "name": "update", - "type": "git_submodule_update_t", - "comment": "The new value to use" - } - ], - "argline": "git_repository *repo, const char *name, git_submodule_update_t update", - "sig": "git_repository *::const char *::git_submodule_update_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Set the update rule for the submodule in the configuration

\n", - "comments": "

This setting won't affect any existing instances.

\n", - "group": "submodule" - }, - "git_submodule_fetch_recurse_submodules": { - "type": "function", - "file": "submodule.h", - "line": 507, - "lineto": 508, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": null - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "git_submodule_recurse_t", - "comment": " 0 if fetchRecurseSubmodules is false, 1 if true" - }, - "description": "

Read the fetchRecurseSubmodules rule for a submodule.

\n", - "comments": "

This accesses the submodule..fetchRecurseSubmodules value for the submodule that controls fetching behavior for the submodule.

\n\n

Note that at this time, libgit2 does not honor this setting and the fetch functionality current ignores submodules.

\n", - "group": "submodule" - }, - "git_submodule_set_fetch_recurse_submodules": { - "type": "function", - "file": "submodule.h", - "line": 520, - "lineto": 523, - "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository to affect" - }, - { - "name": "name", - "type": "const char *", - "comment": "the submodule to configure" - }, - { - "name": "fetch_recurse_submodules", - "type": "git_submodule_recurse_t", - "comment": "Boolean value" - } - ], - "argline": "git_repository *repo, const char *name, git_submodule_recurse_t fetch_recurse_submodules", - "sig": "git_repository *::const char *::git_submodule_recurse_t", - "return": { - "type": "int", - "comment": " old value for fetchRecurseSubmodules" - }, - "description": "

Set the fetchRecurseSubmodules rule for a submodule in the configuration

\n", - "comments": "

This setting won't affect any existing instances.

\n", - "group": "submodule" - }, - "git_submodule_init": { - "type": "function", - "file": "submodule.h", - "line": 538, - "lineto": 538, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "The submodule to write into the superproject config" - }, - { - "name": "overwrite", - "type": "int", - "comment": "By default, existing entries will not be overwritten,\n but setting this to true forces them to be updated." - } - ], - "argline": "git_submodule *submodule, int overwrite", - "sig": "git_submodule *::int", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on failure." - }, - "description": "

Copy submodule info into ".git/config" file.

\n", - "comments": "

Just like "git submodule init", this copies information about the submodule into ".git/config". You can use the accessor functions above to alter the in-memory git_submodule object and control what is written to the config, overriding what is in .gitmodules.

\n", - "group": "submodule" - }, - "git_submodule_repo_init": { - "type": "function", - "file": "submodule.h", - "line": 553, - "lineto": 556, - "args": [ - { - "name": "out", - "type": "git_repository **", - "comment": "Output pointer to the created git repository." - }, - { - "name": "sm", - "type": "const git_submodule *", - "comment": "The submodule to create a new subrepository from." - }, - { - "name": "use_gitlink", - "type": "int", - "comment": "Should the workdir contain a gitlink to\n the repo in .git/modules vs. repo directly in workdir." - } - ], - "argline": "git_repository **out, const git_submodule *sm, int use_gitlink", - "sig": "git_repository **::const git_submodule *::int", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on failure." - }, - "description": "

Set up the subrepository for a submodule in preparation for clone.

\n", - "comments": "

This function can be called to init and set up a submodule repository from a submodule in preparation to clone it from its remote.

\n", - "group": "submodule" - }, - "git_submodule_sync": { - "type": "function", - "file": "submodule.h", - "line": 566, - "lineto": 566, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": null - } - ], - "argline": "git_submodule *submodule", - "sig": "git_submodule *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Copy submodule remote info into submodule repo.

\n", - "comments": "

This copies the information about the submodules URL into the checked out submodule config, acting like "git submodule sync". This is useful if you have altered the URL for the submodule (or it has been altered by a fetch of upstream changes) and you need to update your local repo.

\n", - "group": "submodule" - }, - "git_submodule_open": { - "type": "function", - "file": "submodule.h", - "line": 580, - "lineto": 582, - "args": [ - { - "name": "repo", - "type": "git_repository **", - "comment": "Pointer to the submodule repo which was opened" - }, - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Submodule to be opened" - } - ], - "argline": "git_repository **repo, git_submodule *submodule", - "sig": "git_repository **::git_submodule *", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 if submodule repo could not be opened." - }, - "description": "

Open the repository for a submodule.

\n", - "comments": "

This is a newly opened repository object. The caller is responsible for calling git_repository_free() on it when done. Multiple calls to this function will return distinct git_repository objects. This will only work if the submodule is checked out into the working directory.

\n", - "group": "submodule" - }, - "git_submodule_reload": { - "type": "function", - "file": "submodule.h", - "line": 594, - "lineto": 594, - "args": [ - { - "name": "submodule", - "type": "git_submodule *", - "comment": "The submodule to reload" - }, - { - "name": "force", - "type": "int", - "comment": "Force reload even if the data doesn't seem out of date" - } - ], - "argline": "git_submodule *submodule, int force", - "sig": "git_submodule *::int", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on error" - }, - "description": "

Reread submodule info from config, index, and HEAD.

\n", - "comments": "

Call this to reread cached submodule information for this submodule if you have reason to believe that it has changed.

\n", - "group": "submodule" - }, - "git_submodule_status": { - "type": "function", - "file": "submodule.h", - "line": 610, - "lineto": 614, - "args": [ - { - "name": "status", - "type": "unsigned int *", - "comment": "Combination of `GIT_SUBMODULE_STATUS` flags" - }, - { - "name": "repo", - "type": "git_repository *", - "comment": "the repository in which to look" - }, - { - "name": "name", - "type": "const char *", - "comment": "name of the submodule" - }, - { - "name": "ignore", - "type": "git_submodule_ignore_t", - "comment": "the ignore rules to follow" - } - ], - "argline": "unsigned int *status, git_repository *repo, const char *name, git_submodule_ignore_t ignore", - "sig": "unsigned int *::git_repository *::const char *::git_submodule_ignore_t", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on error" - }, - "description": "

Get the status for a submodule.

\n", - "comments": "

This looks at a submodule and tries to determine the status. It will return a combination of the GIT_SUBMODULE_STATUS values above. How deeply it examines the working directory to do this will depend on the git_submodule_ignore_t value for the submodule.

\n", - "group": "submodule", - "examples": { - "status.c": [ - "ex/HEAD/status.html#git_submodule_status-26" - ] - } - }, - "git_submodule_location": { - "type": "function", - "file": "submodule.h", - "line": 630, - "lineto": 632, - "args": [ - { - "name": "location_status", - "type": "unsigned int *", - "comment": "Combination of first four `GIT_SUBMODULE_STATUS` flags" - }, - { - "name": "submodule", - "type": "git_submodule *", - "comment": "Submodule for which to get status" - } - ], - "argline": "unsigned int *location_status, git_submodule *submodule", - "sig": "unsigned int *::git_submodule *", - "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on error" - }, - "description": "

Get the locations of submodule information.

\n", - "comments": "

This is a bit like a very lightweight version of git_submodule_status. It just returns a made of the first four submodule status values (i.e. the ones like GIT_SUBMODULE_STATUS_IN_HEAD, etc) that tell you where the submodule data comes from (i.e. the HEAD commit, gitmodules file, etc.). This can be useful if you want to know if the submodule is present in the working directory at this point in time, etc.

\n", - "group": "submodule" - }, - "git_commit_create_from_ids": { - "type": "function", - "file": "sys/commit.h", - "line": 34, - "lineto": 44, - "args": [ - { - "name": "id", - "type": "git_oid *", - "comment": null - }, - { - "name": "repo", - "type": "git_repository *", - "comment": null - }, - { - "name": "update_ref", - "type": "const char *", - "comment": null - }, - { - "name": "author", - "type": "const git_signature *", - "comment": null - }, - { - "name": "committer", - "type": "const git_signature *", - "comment": null - }, - { - "name": "message_encoding", - "type": "const char *", - "comment": null - }, - { - "name": "message", - "type": "const char *", - "comment": null - }, - { - "name": "tree", - "type": "const git_oid *", - "comment": null - }, - { - "name": "parent_count", - "type": "size_t", - "comment": null - }, - { - "name": "parents", - "type": "const git_oid *[]", - "comment": null - } - ], - "argline": "git_oid *id, git_repository *repo, const char *update_ref, const git_signature *author, const git_signature *committer, const char *message_encoding, const char *message, const git_oid *tree, size_t parent_count, const git_oid *[] parents", - "sig": "git_oid *::git_repository *::const char *::const git_signature *::const git_signature *::const char *::const char *::const git_oid *::size_t::const git_oid *[]", - "return": { - "type": "int", - "comment": null - }, - "description": "

Create new commit in the repository from a list of git_oid values.

\n", - "comments": "

See documentation for git_commit_create() for information about the parameters, as the meaning is identical excepting that tree and parents now take git_oid. This is a dangerous API in that nor the tree, neither the parents list of git_oids are checked for validity.

\n", - "group": "commit" - }, - "git_commit_create_from_callback": { - "type": "function", - "file": "sys/commit.h", - "line": 66, - "lineto": 76, - "args": [ - { - "name": "id", - "type": "git_oid *", - "comment": null - }, - { - "name": "repo", - "type": "git_repository *", - "comment": null - }, - { - "name": "update_ref", - "type": "const char *", - "comment": null - }, - { - "name": "author", - "type": "const git_signature *", - "comment": null - }, - { - "name": "committer", - "type": "const git_signature *", - "comment": null - }, - { - "name": "message_encoding", - "type": "const char *", - "comment": null - }, - { - "name": "message", - "type": "const char *", - "comment": null - }, - { - "name": "tree", - "type": "const git_oid *", - "comment": null - }, - { - "name": "parent_cb", - "type": "git_commit_parent_callback", - "comment": null - }, + "general.c": ["ex/v1.9.1/general.html#git_revwalk_free-85"], + "log.c": ["ex/v1.9.1/log.html#git_revwalk_free-54"] + } + }, + "git_revwalk_repository": { + "type": "function", + "file": "git2/revwalk.h", + "line": 272, + "lineto": 272, + "args": [ { - "name": "parent_payload", - "type": "void *", - "comment": null + "name": "walk", + "type": "git_revwalk *", + "comment": "the revision walker" } ], - "argline": "git_oid *id, git_repository *repo, const char *update_ref, const git_signature *author, const git_signature *committer, const char *message_encoding, const char *message, const git_oid *tree, git_commit_parent_callback parent_cb, void *parent_payload", - "sig": "git_oid *::git_repository *::const char *::const git_signature *::const git_signature *::const char *::const char *::const git_oid *::git_commit_parent_callback::void *", + "argline": "git_revwalk *walk", + "sig": "git_revwalk *", "return": { - "type": "int", - "comment": null + "type": "git_repository *", + "comment": " the repository being walked" }, - "description": "

Create a new commit in the repository with an callback to supply parents.

\n", - "comments": "

See documentation for git_commit_create() for information about the parameters, as the meaning is identical excepting that tree takes a git_oid and doesn't check for validity, and parent_cb is invoked with parent_payload and should return git_oid values or NULL to indicate that all parents are accounted for.

\n", - "group": "commit" + "description": "

Return the repository on which this walker\n is operating.

\n", + "comments": "", + "group": "revwalk" }, - "git_config_init_backend": { + "git_revwalk_add_hide_cb": { "type": "function", - "file": "sys/config.h", - "line": 97, - "lineto": 99, + "file": "git2/revwalk.h", + "line": 295, + "lineto": 298, "args": [ { - "name": "backend", - "type": "git_config_backend *", - "comment": "the `git_config_backend` struct to initialize." + "name": "walk", + "type": "git_revwalk *", + "comment": "the revision walker" }, { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_CONFIG_BACKEND_VERSION`" + "name": "hide_cb", + "type": "git_revwalk_hide_cb", + "comment": "callback function to hide a commit and its parents" + }, + { + "name": "payload", + "type": "void *", + "comment": "data payload to be passed to callback function" } ], - "argline": "git_config_backend *backend, unsigned int version", - "sig": "git_config_backend *::unsigned int", - "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." - }, - "description": "

Initializes a git_config_backend with default values. Equivalent to\n creating an instance with GIT_CONFIG_BACKEND_INIT.

\n", + "argline": "git_revwalk *walk, git_revwalk_hide_cb hide_cb, void *payload", + "sig": "git_revwalk *::git_revwalk_hide_cb::void *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Adds, changes or removes a callback function to hide a commit and its parents

\n", "comments": "", - "group": "config" + "group": "revwalk" }, - "git_config_add_backend": { + "git_signature_new": { "type": "function", - "file": "sys/config.h", - "line": 121, - "lineto": 126, + "file": "git2/signature.h", + "line": 41, + "lineto": 41, "args": [ { - "name": "cfg", - "type": "git_config *", - "comment": "the configuration to add the file to" + "name": "out", + "type": "git_signature **", + "comment": "new signature, in case of error NULL" }, { - "name": "file", - "type": "git_config_backend *", - "comment": "the configuration file (backend) to add" + "name": "name", + "type": "const char *", + "comment": "name of the person" }, { - "name": "level", - "type": "git_config_level_t", - "comment": "the priority level of the backend" + "name": "email", + "type": "const char *", + "comment": "email of the person" }, { - "name": "repo", - "type": "const git_repository *", - "comment": "optional repository to allow parsing of\n conditional includes" + "name": "time", + "type": "git_time_t", + "comment": "time (in seconds from epoch) when the action happened" }, { - "name": "force", + "name": "offset", "type": "int", - "comment": "if a config file already exists for the given\n priority level, replace it" + "comment": "timezone offset (in minutes) for the time" } ], - "argline": "git_config *cfg, git_config_backend *file, git_config_level_t level, const git_repository *repo, int force", - "sig": "git_config *::git_config_backend *::git_config_level_t::const git_repository *::int", - "return": { - "type": "int", - "comment": " 0 on success, GIT_EEXISTS when adding more than one file\n for a given priority level (and force_replace set to 0), or error code" - }, - "description": "

Add a generic config file instance to an existing config

\n", - "comments": "

Note that the configuration object will free the file automatically.

\n\n

Further queries on this config object will access each of the config file instances in order (instances with a higher priority level will be accessed first).

\n", - "group": "config" + "argline": "git_signature **out, const char *name, const char *email, git_time_t time, int offset", + "sig": "git_signature **::const char *::const char *::git_time_t::int", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a new action signature.

\n", + "comments": "

Call git_signature_free() to free the data.

\n\n

Note: angle brackets ('<' and '>') characters are not allowed to be used in either the name or the email parameter.

\n", + "group": "signature", + "examples": { + "general.c": [ + "ex/v1.9.1/general.html#git_signature_new-86", + "ex/v1.9.1/general.html#git_signature_new-87" + ] + } }, - "git_diff_print_callback__to_buf": { + "git_signature_now": { "type": "function", - "file": "sys/diff.h", - "line": 37, - "lineto": 41, + "file": "git2/signature.h", + "line": 53, + "lineto": 53, "args": [ { - "name": "delta", - "type": "const git_diff_delta *", - "comment": null - }, - { - "name": "hunk", - "type": "const git_diff_hunk *", - "comment": null + "name": "out", + "type": "git_signature **", + "comment": "new signature, in case of error NULL" }, { - "name": "line", - "type": "const git_diff_line *", - "comment": null + "name": "name", + "type": "const char *", + "comment": "name of the person" }, { - "name": "payload", - "type": "void *", - "comment": null + "name": "email", + "type": "const char *", + "comment": "email of the person" } ], - "argline": "const git_diff_delta *delta, const git_diff_hunk *hunk, const git_diff_line *line, void *payload", - "sig": "const git_diff_delta *::const git_diff_hunk *::const git_diff_line *::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Diff print callback that writes to a git_buf.

\n", - "comments": "

This function is provided not for you to call it directly, but instead so you can use it as a function pointer to the git_diff_print or git_patch_print APIs. When using those APIs, you specify a callback to actually handle the diff and/or patch data.

\n\n

Use this callback to easily write that data to a git_buf buffer. You must pass a git_buf * value as the payload to the git_diff_print and/or git_patch_print function. The data will be appended to the buffer (after any existing content).

\n", - "group": "diff" + "argline": "git_signature **out, const char *name, const char *email", + "sig": "git_signature **::const char *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a new action signature with a timestamp of 'now'.

\n", + "comments": "

Call git_signature_free() to free the data.

\n", + "group": "signature", + "examples": { "merge.c": ["ex/v1.9.1/merge.html#git_signature_now-36"] } }, - "git_diff_print_callback__to_file_handle": { + "git_signature_default_from_env": { "type": "function", - "file": "sys/diff.h", - "line": 57, - "lineto": 61, + "file": "git2/signature.h", + "line": 86, + "lineto": 89, "args": [ { - "name": "delta", - "type": "const git_diff_delta *", - "comment": null - }, - { - "name": "hunk", - "type": "const git_diff_hunk *", - "comment": null + "name": "author_out", + "type": "git_signature **", + "comment": "pointer to set the author signature, or NULL" }, { - "name": "line", - "type": "const git_diff_line *", - "comment": null + "name": "committer_out", + "type": "git_signature **", + "comment": "pointer to set the committer signature, or NULL" }, { - "name": "payload", - "type": "void *", - "comment": null + "name": "repo", + "type": "git_repository *", + "comment": "repository pointer" } ], - "argline": "const git_diff_delta *delta, const git_diff_hunk *hunk, const git_diff_line *line, void *payload", - "sig": "const git_diff_delta *::const git_diff_hunk *::const git_diff_line *::void *", + "argline": "git_signature **author_out, git_signature **committer_out, git_repository *repo", + "sig": "git_signature **::git_signature **::git_repository *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, GIT_ENOTFOUND if config is missing, or error code" }, - "description": "

Diff print callback that writes to stdio FILE handle.

\n", - "comments": "

This function is provided not for you to call it directly, but instead so you can use it as a function pointer to the git_diff_print or git_patch_print APIs. When using those APIs, you specify a callback to actually handle the diff and/or patch data.

\n\n

Use this callback to easily write that data to a stdio FILE handle. You must pass a FILE * value (such as stdout or stderr or the return value from fopen()) as the payload to the git_diff_print and/or git_patch_print function. If you pass NULL, this will write data to stdout.

\n", - "group": "diff" + "description": "

Create a new author and/or committer signatures with default\n information based on the configuration and environment variables.

\n", + "comments": "

If author_out is set, it will be populated with the author information. The GIT_AUTHOR_NAME and GIT_AUTHOR_EMAIL environment variables will be honored, and user.name and user.email configuration options will be honored if the environment variables are unset. For timestamps, GIT_AUTHOR_DATE will be used, otherwise the current time will be used.

\n\n

If committer_out is set, it will be populated with the committer information. The GIT_COMMITTER_NAME and GIT_COMMITTER_EMAIL environment variables will be honored, and user.name and user.email configuration options will be honored if the environment variables are unset. For timestamps, GIT_COMMITTER_DATE will be used, otherwise the current time will be used.

\n\n

If neither GIT_AUTHOR_DATE nor GIT_COMMITTER_DATE are set, both timestamps will be set to the same time.

\n\n

It will return GIT_ENOTFOUND if either the user.name or user.email are not set and there is no fallback from an environment variable. One of author_out or committer_out must be set.

\n", + "group": "signature", + "examples": { + "commit.c": ["ex/v1.9.1/commit.html#git_signature_default_from_env-10"], + "init.c": ["ex/v1.9.1/init.html#git_signature_default_from_env-10"], + "tag.c": ["ex/v1.9.1/tag.html#git_signature_default_from_env-13"] + } }, - "git_diff_get_perfdata": { + "git_signature_default": { "type": "function", - "file": "sys/diff.h", - "line": 83, - "lineto": 84, + "file": "git2/signature.h", + "line": 107, + "lineto": 107, "args": [ { "name": "out", - "type": "git_diff_perfdata *", - "comment": "Structure to be filled with diff performance data" + "type": "git_signature **", + "comment": "new signature" }, { - "name": "diff", - "type": "const git_diff *", - "comment": "Diff to read performance data from" + "name": "repo", + "type": "git_repository *", + "comment": "repository pointer" } ], - "argline": "git_diff_perfdata *out, const git_diff *diff", - "sig": "git_diff_perfdata *::const git_diff *", + "argline": "git_signature **out, git_repository *repo", + "sig": "git_signature **::git_repository *", "return": { "type": "int", - "comment": " 0 for success, \n<\n0 for error" + "comment": " 0 on success, GIT_ENOTFOUND if config is missing, or error code" }, - "description": "

Get performance data for a diff object.

\n", - "comments": "", - "group": "diff" + "description": "

Create a new action signature with default user and now timestamp.

\n", + "comments": "

This looks up the user.name and user.email from the configuration and uses the current time as the timestamp, and creates a new signature based on that information. It will return GIT_ENOTFOUND if either the user.name or user.email are not set.

\n\n

Note that these do not examine environment variables, only the configuration files. Use git_signature_default_from_env to consider the environment variables.

\n", + "group": "signature" }, - "git_status_list_get_perfdata": { + "git_signature_from_buffer": { "type": "function", - "file": "sys/diff.h", - "line": 89, - "lineto": 90, + "file": "git2/signature.h", + "line": 120, + "lineto": 120, "args": [ { "name": "out", - "type": "git_diff_perfdata *", - "comment": null + "type": "git_signature **", + "comment": "new signature" }, - { - "name": "status", - "type": "const git_status_list *", - "comment": null - } + { "name": "buf", "type": "const char *", "comment": "signature string" } ], - "argline": "git_diff_perfdata *out, const git_status_list *status", - "sig": "git_diff_perfdata *::const git_status_list *", + "argline": "git_signature **out, const char *buf", + "sig": "git_signature **::const char *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, GIT_EINVALID if the signature is not parseable, or an error code" }, - "description": "

Get performance data for diffs from a git_status_list

\n", + "description": "

Create a new signature by parsing the given buffer, which is\n expected to be in the format "Real Name \n<email

\n\n
\n

timestamp tzoffset",\n where timestamp is the number of seconds since the Unix epoch and\n tzoffset is the timezone offset in hhmm format (note the lack\n of a colon separator).

\n
\n", "comments": "", - "group": "status" + "group": "signature" }, - "git_filter_lookup": { + "git_signature_dup": { "type": "function", - "file": "sys/filter.h", - "line": 27, - "lineto": 27, + "file": "git2/signature.h", + "line": 132, + "lineto": 132, "args": [ { - "name": "name", - "type": "const char *", - "comment": "The name of the filter" + "name": "dest", + "type": "git_signature **", + "comment": "pointer where to store the copy" + }, + { + "name": "sig", + "type": "const git_signature *", + "comment": "signature to duplicate" } ], - "argline": "const char *name", - "sig": "const char *", - "return": { - "type": "git_filter *", - "comment": " Pointer to the filter object or NULL if not found" - }, - "description": "

Look up a filter by name

\n", - "comments": "", - "group": "filter" + "argline": "git_signature **dest, const git_signature *sig", + "sig": "git_signature **::const git_signature *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a copy of an existing signature. All internal strings are also\n duplicated.

\n", + "comments": "

Call git_signature_free() to free the data.

\n", + "group": "signature" }, - "git_filter_list_new": { + "git_signature_free": { "type": "function", - "file": "sys/filter.h", - "line": 57, - "lineto": 61, + "file": "git2/signature.h", + "line": 143, + "lineto": 143, "args": [ { - "name": "out", - "type": "git_filter_list **", - "comment": null - }, - { - "name": "repo", - "type": "git_repository *", - "comment": null - }, - { - "name": "mode", - "type": "git_filter_mode_t", - "comment": null - }, - { - "name": "options", - "type": "uint32_t", - "comment": null + "name": "sig", + "type": "git_signature *", + "comment": "signature to free" } ], - "argline": "git_filter_list **out, git_repository *repo, git_filter_mode_t mode, uint32_t options", - "sig": "git_filter_list **::git_repository *::git_filter_mode_t::uint32_t", - "return": { - "type": "int", - "comment": null - }, - "description": "

Create a new empty filter list

\n", - "comments": "

Normally you won't use this because git_filter_list_load will create the filter list for you, but you can use this in combination with the git_filter_lookup and git_filter_list_push functions to assemble your own chains of filters.

\n", - "group": "filter" + "argline": "git_signature *sig", + "sig": "git_signature *", + "return": { "type": "void", "comment": null }, + "description": "

Free an existing signature.

\n", + "comments": "

Because the signature is not an opaque structure, it is legal to free it manually, but be sure to free the "name" and "email" strings in addition to the structure itself.

\n", + "group": "signature", + "examples": { + "commit.c": [ + "ex/v1.9.1/commit.html#git_signature_free-11", + "ex/v1.9.1/commit.html#git_signature_free-12" + ], + "general.c": [ + "ex/v1.9.1/general.html#git_signature_free-88", + "ex/v1.9.1/general.html#git_signature_free-89" + ], + "init.c": [ + "ex/v1.9.1/init.html#git_signature_free-11", + "ex/v1.9.1/init.html#git_signature_free-12" + ], + "tag.c": ["ex/v1.9.1/tag.html#git_signature_free-14"] + } }, - "git_filter_list_push": { + "git_stash_save": { "type": "function", - "file": "sys/filter.h", - "line": 76, + "file": "git2/stash.h", + "line": 72, "lineto": 77, "args": [ { - "name": "fl", - "type": "git_filter_list *", - "comment": null + "name": "out", + "type": "git_oid *", + "comment": "Object id of the commit containing the stashed state.\n This commit is also the target of the direct reference refs/stash." }, { - "name": "filter", - "type": "git_filter *", - "comment": null + "name": "repo", + "type": "git_repository *", + "comment": "The owning repository." }, { - "name": "payload", - "type": "void *", - "comment": null + "name": "stasher", + "type": "const git_signature *", + "comment": "The identity of the person performing the stashing." + }, + { + "name": "message", + "type": "const char *", + "comment": "Optional description along with the stashed state." + }, + { + "name": "flags", + "type": "uint32_t", + "comment": "Flags to control the stashing process. (see GIT_STASH_* above)" } ], - "argline": "git_filter_list *fl, git_filter *filter, void *payload", - "sig": "git_filter_list *::git_filter *::void *", + "argline": "git_oid *out, git_repository *repo, const git_signature *stasher, const char *message, uint32_t flags", + "sig": "git_oid *::git_repository *::const git_signature *::const char *::uint32_t", "return": { "type": "int", - "comment": null + "comment": " 0 on success, GIT_ENOTFOUND where there's nothing to stash,\n or error code." }, - "description": "

Add a filter to a filter list with the given payload.

\n", - "comments": "

Normally you won't have to do this because the filter list is created by calling the "check" function on registered filters when the filter attributes are set, but this does allow more direct manipulation of filter lists when desired.

\n\n

Note that normally the "check" function can set up a payload for the filter. Using this function, you can either pass in a payload if you know the expected payload format, or you can pass NULL. Some filters may fail with a NULL payload. Good luck!

\n", - "group": "filter" + "description": "

Save the local modifications to a new stash.

\n", + "comments": "", + "group": "stash" }, - "git_filter_list_length": { + "git_stash_save_options_init": { "type": "function", - "file": "sys/filter.h", - "line": 90, - "lineto": 90, + "file": "git2/stash.h", + "line": 118, + "lineto": 119, "args": [ { - "name": "fl", - "type": "const git_filter_list *", - "comment": "A filter list" + "name": "opts", + "type": "git_stash_save_options *", + "comment": "The `git_stash_save_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_STASH_SAVE_OPTIONS_VERSION`." } ], - "argline": "const git_filter_list *fl", - "sig": "const git_filter_list *", + "argline": "git_stash_save_options *opts, unsigned int version", + "sig": "git_stash_save_options *::unsigned int", "return": { - "type": "size_t", - "comment": " The number of filters in the list" + "type": "int", + "comment": " Zero on success; -1 on failure." }, - "description": "

Look up how many filters are in the list

\n", - "comments": "

We will attempt to apply all of these filters to any data passed in, but note that the filter apply action still has the option of skipping data that is passed in (for example, the CRLF filter will skip data that appears to be binary).

\n", - "group": "filter" + "description": "

Initialize git_stash_save_options structure

\n", + "comments": "

Initializes a git_stash_save_options with default values. Equivalent to creating an instance with GIT_STASH_SAVE_OPTIONS_INIT.

\n", + "group": "stash" }, - "git_filter_source_repo": { + "git_stash_save_with_opts": { "type": "function", - "file": "sys/filter.h", - "line": 100, - "lineto": 100, + "file": "git2/stash.h", + "line": 131, + "lineto": 134, "args": [ { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "out", + "type": "git_oid *", + "comment": "Object id of the commit containing the stashed state.\n This commit is also the target of the direct reference refs/stash." + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "The owning repository." + }, + { + "name": "opts", + "type": "const git_stash_save_options *", + "comment": "The stash options." } ], - "argline": "const git_filter_source *src", - "sig": "const git_filter_source *", + "argline": "git_oid *out, git_repository *repo, const git_stash_save_options *opts", + "sig": "git_oid *::git_repository *::const git_stash_save_options *", "return": { - "type": "git_repository *", - "comment": null + "type": "int", + "comment": " 0 on success, GIT_ENOTFOUND where there's nothing to stash,\n or error code." }, - "description": "

Get the repository that the source data is coming from.

\n", + "description": "

Save the local modifications to a new stash, with options.

\n", "comments": "", - "group": "filter" + "group": "stash" }, - "git_filter_source_path": { + "git_stash_apply_options_init": { "type": "function", - "file": "sys/filter.h", - "line": 105, - "lineto": 105, + "file": "git2/stash.h", + "line": 225, + "lineto": 226, "args": [ { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "opts", + "type": "git_stash_apply_options *", + "comment": "The `git_stash_apply_options` struct to initialize." + }, + { + "name": "version", + "type": "unsigned int", + "comment": "The struct version; pass `GIT_STASH_APPLY_OPTIONS_VERSION`." } ], - "argline": "const git_filter_source *src", - "sig": "const git_filter_source *", + "argline": "git_stash_apply_options *opts, unsigned int version", + "sig": "git_stash_apply_options *::unsigned int", "return": { - "type": "const char *", - "comment": null + "type": "int", + "comment": " Zero on success; -1 on failure." }, - "description": "

Get the path that the source data is coming from.

\n", - "comments": "", - "group": "filter" + "description": "

Initialize git_stash_apply_options structure

\n", + "comments": "

Initializes a git_stash_apply_options with default values. Equivalent to creating an instance with GIT_STASH_APPLY_OPTIONS_INIT.

\n", + "group": "stash" }, - "git_filter_source_filemode": { + "git_stash_apply": { "type": "function", - "file": "sys/filter.h", - "line": 111, - "lineto": 111, + "file": "git2/stash.h", + "line": 252, + "lineto": 255, "args": [ { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "repo", + "type": "git_repository *", + "comment": "The owning repository." + }, + { + "name": "index", + "type": "size_t", + "comment": "The position within the stash list. 0 points to the\n most recent stashed state." + }, + { + "name": "options", + "type": "const git_stash_apply_options *", + "comment": "Optional options to control how stashes are applied." } ], - "argline": "const git_filter_source *src", - "sig": "const git_filter_source *", + "argline": "git_repository *repo, size_t index, const git_stash_apply_options *options", + "sig": "git_repository *::size_t::const git_stash_apply_options *", "return": { - "type": "uint16_t", - "comment": null + "type": "int", + "comment": " 0 on success, GIT_ENOTFOUND if there's no stashed state for the\n given index, GIT_EMERGECONFLICT if changes exist in the working\n directory, or an error code" }, - "description": "

Get the file mode of the source file\n If the mode is unknown, this will return 0

\n", - "comments": "", - "group": "filter" + "description": "

Apply a single stashed state from the stash list.

\n", + "comments": "

If local changes in the working directory conflict with changes in the stash then GIT_EMERGECONFLICT will be returned. In this case, the index will always remain unmodified and all files in the working directory will remain unmodified. However, if you are restoring untracked files or ignored files and there is a conflict when applying the modified files, then those files will remain in the working directory.

\n\n

If passing the GIT_STASH_APPLY_REINSTATE_INDEX flag and there would be conflicts when reinstating the index, the function will return GIT_EMERGECONFLICT and both the working directory and index will be left unmodified.

\n", + "group": "stash" }, - "git_filter_source_id": { + "git_stash_foreach": { "type": "function", - "file": "sys/filter.h", - "line": 118, - "lineto": 118, + "file": "git2/stash.h", + "line": 288, + "lineto": 291, "args": [ { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "repo", + "type": "git_repository *", + "comment": "Repository where to find the stash." + }, + { + "name": "callback", + "type": "git_stash_cb", + "comment": "Callback to invoke per found stashed state. The most\n recent stash state will be enumerated first." + }, + { + "name": "payload", + "type": "void *", + "comment": "Extra parameter to callback function." } ], - "argline": "const git_filter_source *src", - "sig": "const git_filter_source *", + "argline": "git_repository *repo, git_stash_cb callback, void *payload", + "sig": "git_repository *::git_stash_cb::void *", "return": { - "type": "const git_oid *", - "comment": null + "type": "int", + "comment": " 0 on success, non-zero callback return value, or error code." }, - "description": "

Get the OID of the source\n If the OID is unknown (often the case with GIT_FILTER_CLEAN) then\n this will return NULL.

\n", - "comments": "", - "group": "filter" + "description": "

Loop over all the stashed states and issue a callback for each one.

\n", + "comments": "

If the callback returns a non-zero value, this will stop looping.

\n", + "group": "stash" }, - "git_filter_source_mode": { + "git_stash_drop": { "type": "function", - "file": "sys/filter.h", - "line": 123, - "lineto": 123, + "file": "git2/stash.h", + "line": 304, + "lineto": 306, "args": [ { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "repo", + "type": "git_repository *", + "comment": "The owning repository." + }, + { + "name": "index", + "type": "size_t", + "comment": "The position within the stash list. 0 points to the\n most recent stashed state." } ], - "argline": "const git_filter_source *src", - "sig": "const git_filter_source *", + "argline": "git_repository *repo, size_t index", + "sig": "git_repository *::size_t", "return": { - "type": "git_filter_mode_t", - "comment": null + "type": "int", + "comment": " 0 on success, GIT_ENOTFOUND if there's no stashed state for the given\n index, or error code." }, - "description": "

Get the git_filter_mode_t to be used

\n", + "description": "

Remove a single stashed state from the stash list.

\n", "comments": "", - "group": "filter" + "group": "stash" }, - "git_filter_source_flags": { + "git_stash_pop": { "type": "function", - "file": "sys/filter.h", - "line": 128, - "lineto": 128, + "file": "git2/stash.h", + "line": 320, + "lineto": 323, "args": [ { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "repo", + "type": "git_repository *", + "comment": "The owning repository." + }, + { + "name": "index", + "type": "size_t", + "comment": "The position within the stash list. 0 points to the\n most recent stashed state." + }, + { + "name": "options", + "type": "const git_stash_apply_options *", + "comment": "Optional options to control how stashes are applied." } ], - "argline": "const git_filter_source *src", - "sig": "const git_filter_source *", + "argline": "git_repository *repo, size_t index, const git_stash_apply_options *options", + "sig": "git_repository *::size_t::const git_stash_apply_options *", "return": { - "type": "uint32_t", - "comment": null + "type": "int", + "comment": " 0 on success, GIT_ENOTFOUND if there's no stashed state for the given\n index, or error code. (see git_stash_apply() above for details)" }, - "description": "

Get the combination git_filter_flag_t options to be applied

\n", + "description": "

Apply a single stashed state from the stash list and remove it from the list\n if successful.

\n", "comments": "", - "group": "filter" + "group": "stash" }, - "git_filter_init": { + "git_status_options_init": { "type": "function", - "file": "sys/filter.h", - "line": 284, - "lineto": 284, + "file": "git2/status.h", + "line": 280, + "lineto": 282, "args": [ { - "name": "filter", - "type": "git_filter *", - "comment": "the `git_filter` struct to initialize." + "name": "opts", + "type": "git_status_options *", + "comment": "The `git_status_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version the struct; pass `GIT_FILTER_VERSION`" + "comment": "The struct version; pass `GIT_STATUS_OPTIONS_VERSION`." } ], - "argline": "git_filter *filter, unsigned int version", - "sig": "git_filter *::unsigned int", + "argline": "git_status_options *opts, unsigned int version", + "sig": "git_status_options *::unsigned int", "return": { "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_filter with default values. Equivalent to\n creating an instance with GIT_FILTER_INIT.

\n", - "comments": "", - "group": "filter" + "description": "

Initialize git_status_options structure

\n", + "comments": "

Initializes a git_status_options with default values. Equivalent to creating an instance with GIT_STATUS_OPTIONS_INIT.

\n", + "group": "status" }, - "git_filter_register": { + "git_status_foreach": { "type": "function", - "file": "sys/filter.h", - "line": 312, - "lineto": 313, + "file": "git2/status.h", + "line": 320, + "lineto": 323, "args": [ { - "name": "name", - "type": "const char *", - "comment": "A name by which the filter can be referenced. Attempting\n \t\t\tto register with an in-use name will return GIT_EEXISTS." + "name": "repo", + "type": "git_repository *", + "comment": "A repository object" }, { - "name": "filter", - "type": "git_filter *", - "comment": "The filter definition. This pointer will be stored as is\n \t\t\tby libgit2 so it must be a durable allocation (either static\n \t\t\tor on the heap)." + "name": "callback", + "type": "git_status_cb", + "comment": "The function to call on each file" }, { - "name": "priority", - "type": "int", - "comment": "The priority for filter application" + "name": "payload", + "type": "void *", + "comment": "Pointer to pass through to callback function" } ], - "argline": "const char *name, git_filter *filter, int priority", - "sig": "const char *::git_filter *::int", + "argline": "git_repository *repo, git_status_cb callback, void *payload", + "sig": "git_repository *::git_status_cb::void *", "return": { "type": "int", - "comment": " 0 on successful registry, error code \n<\n0 on failure" + "comment": " 0 on success, non-zero callback return value, or error code" }, - "description": "

Register a filter under a given name with a given priority.

\n", - "comments": "

As mentioned elsewhere, the initialize callback will not be invoked immediately. It is deferred until the filter is used in some way.

\n\n

A filter's attribute checks and check and apply callbacks will be issued in order of priority on smudge (to workdir), and in reverse order of priority on clean (to odb).

\n\n

Two filters are preregistered with libgit2: - GIT_FILTER_CRLF with priority 0 - GIT_FILTER_IDENT with priority 100

\n\n

Currently the filter registry is not thread safe, so any registering or deregistering of filters must be done outside of any possible usage of the filters (i.e. during application setup or shutdown).

\n", - "group": "filter" + "description": "

Gather file statuses and run a callback for each one.

\n", + "comments": "

The callback is passed the path of the file, the status (a combination of the git_status_t values above) and the payload data pointer passed into this function.

\n\n

If the callback returns a non-zero value, this function will stop looping and return that value to caller.

\n", + "group": "status", + "examples": { "status.c": ["ex/v1.9.1/status.html#git_status_foreach-6"] } }, - "git_filter_unregister": { + "git_status_foreach_ext": { "type": "function", - "file": "sys/filter.h", - "line": 328, - "lineto": 328, + "file": "git2/status.h", + "line": 344, + "lineto": 348, "args": [ { - "name": "name", - "type": "const char *", - "comment": "The name under which the filter was registered" + "name": "repo", + "type": "git_repository *", + "comment": "Repository object" + }, + { + "name": "opts", + "type": "const git_status_options *", + "comment": "Status options structure" + }, + { + "name": "callback", + "type": "git_status_cb", + "comment": "The function to call on each file" + }, + { + "name": "payload", + "type": "void *", + "comment": "Pointer to pass through to callback function" } ], - "argline": "const char *name", - "sig": "const char *", + "argline": "git_repository *repo, const git_status_options *opts, git_status_cb callback, void *payload", + "sig": "git_repository *::const git_status_options *::git_status_cb::void *", "return": { "type": "int", - "comment": " 0 on success, error code \n<\n0 on failure" + "comment": " 0 on success, non-zero callback return value, or error code" }, - "description": "

Remove the filter with the given name

\n", - "comments": "

Attempting to remove the builtin libgit2 filters is not permitted and will return an error.

\n\n

Currently the filter registry is not thread safe, so any registering or deregistering of filters must be done outside of any possible usage of the filters (i.e. during application setup or shutdown).

\n", - "group": "filter" + "description": "

Gather file status information and run callbacks as requested.

\n", + "comments": "

This is an extended version of the git_status_foreach() API that allows for more granular control over which paths will be processed and in what order. See the git_status_options structure for details about the additional controls that this makes available.

\n\n

Note that if a pathspec is given in the git_status_options to filter the status, then the results from rename detection (if you enable it) may not be accurate. To do rename detection properly, this must be called with no pathspec so that all files can be considered.

\n", + "group": "status", + "examples": { + "status.c": ["ex/v1.9.1/status.html#git_status_foreach_ext-7"] + } }, - "git_hashsig_create": { + "git_status_file": { "type": "function", - "file": "sys/hashsig.h", - "line": 62, - "lineto": 66, + "file": "git2/status.h", + "line": 376, + "lineto": 379, "args": [ { - "name": "out", - "type": "git_hashsig **", - "comment": "The computed similarity signature." - }, - { - "name": "buf", - "type": "const char *", - "comment": "The input buffer." + "name": "status_flags", + "type": "unsigned int *", + "comment": "Output combination of git_status_t values for file" }, { - "name": "buflen", - "type": "size_t", - "comment": "The input buffer size." + "name": "repo", + "type": "git_repository *", + "comment": "A repository object" }, { - "name": "opts", - "type": "git_hashsig_option_t", - "comment": "The signature computation options (see above)." + "name": "path", + "type": "const char *", + "comment": "The exact path to retrieve status for relative to the\n repository working directory" } ], - "argline": "git_hashsig **out, const char *buf, size_t buflen, git_hashsig_option_t opts", - "sig": "git_hashsig **::const char *::size_t::git_hashsig_option_t", + "argline": "unsigned int *status_flags, git_repository *repo, const char *path", + "sig": "unsigned int *::git_repository *::const char *", "return": { "type": "int", - "comment": " 0 on success, GIT_EBUFS if the buffer doesn't contain enough data to\n compute a valid signature (unless GIT_HASHSIG_ALLOW_SMALL_FILES is set), or\n error code." + "comment": " 0 on success, GIT_ENOTFOUND if the file is not found in the HEAD,\n index, and work tree, GIT_EAMBIGUOUS if `path` matches multiple files\n or if it refers to a folder, and -1 on other errors." }, - "description": "

Compute a similarity signature for a text buffer

\n", - "comments": "

If you have passed the option GIT_HASHSIG_IGNORE_WHITESPACE, then the whitespace will be removed from the buffer while it is being processed, modifying the buffer in place. Sorry about that!

\n", - "group": "hashsig" + "description": "

Get file status for a single file.

\n", + "comments": "

This tries to get status for the filename that you give. If no files match that name (in either the HEAD, index, or working directory), this returns GIT_ENOTFOUND.

\n\n

If the name matches multiple files (for example, if the path names a directory or if running on a case- insensitive filesystem and yet the HEAD has two entries that both match the path), then this returns GIT_EAMBIGUOUS because it cannot give correct results.

\n\n

This does not do any sort of rename detection. Renames require a set of targets and because of the path filtering, there is not enough information to check renames correctly. To check file status with rename detection, there is no choice but to do a full git_status_list_new and scan through looking for the path that you are interested in.

\n", + "group": "status", + "examples": { "add.c": ["ex/v1.9.1/add.html#git_status_file-6"] } }, - "git_hashsig_create_fromfile": { + "git_status_list_new": { "type": "function", - "file": "sys/hashsig.h", - "line": 81, - "lineto": 84, + "file": "git2/status.h", + "line": 394, + "lineto": 397, "args": [ { "name": "out", - "type": "git_hashsig **", - "comment": "The computed similarity signature." + "type": "git_status_list **", + "comment": "Pointer to store the status results in" }, { - "name": "path", - "type": "const char *", - "comment": "The path to the input file." + "name": "repo", + "type": "git_repository *", + "comment": "Repository object" }, { "name": "opts", - "type": "git_hashsig_option_t", - "comment": "The signature computation options (see above)." + "type": "const git_status_options *", + "comment": "Status options structure" } ], - "argline": "git_hashsig **out, const char *path, git_hashsig_option_t opts", - "sig": "git_hashsig **::const char *::git_hashsig_option_t", - "return": { - "type": "int", - "comment": " 0 on success, GIT_EBUFS if the buffer doesn't contain enough data to\n compute a valid signature (unless GIT_HASHSIG_ALLOW_SMALL_FILES is set), or\n error code." - }, - "description": "

Compute a similarity signature for a text file

\n", - "comments": "

This walks through the file, only loading a maximum of 4K of file data at a time. Otherwise, it acts just like git_hashsig_create.

\n", - "group": "hashsig" + "argline": "git_status_list **out, git_repository *repo, const git_status_options *opts", + "sig": "git_status_list **::git_repository *::const git_status_options *", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Gather file status information and populate the git_status_list.

\n", + "comments": "

Note that if a pathspec is given in the git_status_options to filter the status, then the results from rename detection (if you enable it) may not be accurate. To do rename detection properly, this must be called with no pathspec so that all files can be considered.

\n", + "group": "status", + "examples": { + "status.c": [ + "ex/v1.9.1/status.html#git_status_list_new-8", + "ex/v1.9.1/status.html#git_status_list_new-9" + ] + } }, - "git_hashsig_free": { + "git_status_list_entrycount": { "type": "function", - "file": "sys/hashsig.h", - "line": 91, - "lineto": 91, + "file": "git2/status.h", + "line": 408, + "lineto": 409, "args": [ { - "name": "sig", - "type": "git_hashsig *", - "comment": "The similarity signature to free." + "name": "statuslist", + "type": "git_status_list *", + "comment": "Existing status list object" } ], - "argline": "git_hashsig *sig", - "sig": "git_hashsig *", + "argline": "git_status_list *statuslist", + "sig": "git_status_list *", "return": { - "type": "void", - "comment": null + "type": "size_t", + "comment": " the number of status entries" }, - "description": "

Release memory for a content similarity signature

\n", - "comments": "", - "group": "hashsig" + "description": "

Gets the count of status entries in this list.

\n", + "comments": "

If there are no changes in status (at least according the options given when the status list was created), this can return 0.

\n", + "group": "status", + "examples": { + "status.c": [ + "ex/v1.9.1/status.html#git_status_list_entrycount-10", + "ex/v1.9.1/status.html#git_status_list_entrycount-11" + ] + } }, - "git_hashsig_compare": { + "git_status_byindex": { "type": "function", - "file": "sys/hashsig.h", - "line": 100, - "lineto": 102, + "file": "git2/status.h", + "line": 420, + "lineto": 422, "args": [ { - "name": "a", - "type": "const git_hashsig *", - "comment": "The first similarity signature to compare." + "name": "statuslist", + "type": "git_status_list *", + "comment": "Existing status list object" }, - { - "name": "b", - "type": "const git_hashsig *", - "comment": "The second similarity signature to compare." - } + { "name": "idx", "type": "size_t", "comment": "Position of the entry" } ], - "argline": "const git_hashsig *a, const git_hashsig *b", - "sig": "const git_hashsig *::const git_hashsig *", + "argline": "git_status_list *statuslist, size_t idx", + "sig": "git_status_list *::size_t", "return": { - "type": "int", - "comment": " [0 to 100] on success as the similarity score, or error code." + "type": "const git_status_entry *", + "comment": " Pointer to the entry; NULL if out of bounds" }, - "description": "

Measure similarity score between two similarity signatures

\n", - "comments": "", - "group": "hashsig" + "description": "

Get a pointer to one of the entries in the status list.

\n", + "comments": "

The entry is not modifiable and should not be freed.

\n", + "group": "status", + "examples": { + "status.c": [ + "ex/v1.9.1/status.html#git_status_byindex-12", + "ex/v1.9.1/status.html#git_status_byindex-13", + "ex/v1.9.1/status.html#git_status_byindex-14", + "ex/v1.9.1/status.html#git_status_byindex-15", + "ex/v1.9.1/status.html#git_status_byindex-16", + "ex/v1.9.1/status.html#git_status_byindex-17" + ] + } }, - "git_mempack_new": { + "git_status_list_free": { "type": "function", - "file": "sys/mempack.h", - "line": 45, - "lineto": 45, + "file": "git2/status.h", + "line": 429, + "lineto": 430, "args": [ { - "name": "out", - "type": "git_odb_backend **", - "comment": "Pointer where to store the ODB backend" + "name": "statuslist", + "type": "git_status_list *", + "comment": "Existing status list object" } ], - "argline": "git_odb_backend **out", - "sig": "git_odb_backend **", - "return": { - "type": "int", - "comment": " 0 on success; error code otherwise" - }, - "description": "
Instantiate a new mempack backend.\n
\n", - "comments": "
The backend must be added to an existing ODB with the highest   priority.\n\n    git_mempack_new(&mempacker);        git_repository_odb(&odb, repository);       git_odb_add_backend(odb, mempacker, 999);\n\nOnce the backend has been loaded, all writes to the ODB will    instead be queued in memory, and can be finalized with  `git_mempack_dump`.\n\nSubsequent reads will also be served from the in-memory store   to ensure consistency, until the memory store is dumped.\n
\n", - "group": "mempack" + "argline": "git_status_list *statuslist", + "sig": "git_status_list *", + "return": { "type": "void", "comment": null }, + "description": "

Free an existing status list

\n", + "comments": "", + "group": "status", + "examples": { + "status.c": ["ex/v1.9.1/status.html#git_status_list_free-18"] + } }, - "git_mempack_dump": { + "git_status_should_ignore": { "type": "function", - "file": "sys/mempack.h", - "line": 68, - "lineto": 68, + "file": "git2/status.h", + "line": 448, + "lineto": 451, "args": [ { - "name": "pack", - "type": "git_buf *", - "comment": "Buffer where to store the raw packfile" + "name": "ignored", + "type": "int *", + "comment": "Boolean returning 0 if the file is not ignored, 1 if it is" }, { "name": "repo", "type": "git_repository *", - "comment": "The active repository where the backend is loaded" + "comment": "A repository object" }, { - "name": "backend", - "type": "git_odb_backend *", - "comment": "The mempack backend" + "name": "path", + "type": "const char *", + "comment": "The file to check ignores for, rooted at the repo's workdir." } ], - "argline": "git_buf *pack, git_repository *repo, git_odb_backend *backend", - "sig": "git_buf *::git_repository *::git_odb_backend *", + "argline": "int *ignored, git_repository *repo, const char *path", + "sig": "int *::git_repository *::const char *", "return": { "type": "int", - "comment": " 0 on success; error code otherwise" + "comment": " 0 if ignore rules could be processed for the file (regardless\n of whether it exists or not), or an error \n<\n 0 if they could not." }, - "description": "
Dump all the queued in-memory writes to a packfile.\n
\n", - "comments": "
The contents of the packfile will be stored in the given buffer.    It is the caller's responsibility to ensure that the generated  packfile is available to the repository (e.g. by writing it to disk, or doing something crazy like distributing it across   several copies of the repository over a network).\n\nOnce the generated packfile is available to the repository, call `git_mempack_reset` to cleanup the memory store.\n\nCalling `git_mempack_reset` before the packfile has been    written to disk will result in an inconsistent repository   (the objects in the memory store won't be accessible).\n
\n", - "group": "mempack" + "description": "

Test if the ignore rules apply to a given file.

\n", + "comments": "

This function checks the ignore rules to see if they would apply to the given file. This indicates if the file would be ignored regardless of whether the file is already in the index or committed to the repository.

\n\n

One way to think of this is if you were to do "git add ." on the directory containing the file, would it be added or not?

\n", + "group": "status" }, - "git_mempack_reset": { + "git_strarray_dispose": { "type": "function", - "file": "sys/mempack.h", - "line": 82, - "lineto": 82, + "file": "git2/strarray.h", + "line": 37, + "lineto": 37, "args": [ { - "name": "backend", - "type": "git_odb_backend *", - "comment": "The mempack backend" + "name": "array", + "type": "git_strarray *", + "comment": "The git_strarray that contains strings to free" } ], - "argline": "git_odb_backend *backend", - "sig": "git_odb_backend *", - "return": { - "type": "void", - "comment": null - }, - "description": "
Reset the memory packer by clearing all the queued objects.\n
\n", - "comments": "
This assumes that `git_mempack_dump` has been called before to  store all the queued objects into a single packfile.\n\nAlternatively, call `reset` without a previous dump to "undo"   all the recently written objects, giving transaction-like   semantics to the Git repository.\n
\n", - "group": "mempack" + "argline": "git_strarray *array", + "sig": "git_strarray *", + "return": { "type": "void", "comment": null }, + "description": "

Free the strings contained in a string array. This method should\n be called on git_strarray objects that were provided by the\n library. Not doing so, will result in a memory leak.

\n", + "comments": "

This does not free the git_strarray itself, since the library will never allocate that object directly itself.

\n", + "group": "strarray", + "examples": { + "checkout.c": ["ex/v1.9.1/checkout.html#git_strarray_dispose-26"], + "general.c": ["ex/v1.9.1/general.html#git_strarray_dispose-90"], + "remote.c": [ + "ex/v1.9.1/remote.html#git_strarray_dispose-11", + "ex/v1.9.1/remote.html#git_strarray_dispose-12" + ], + "tag.c": ["ex/v1.9.1/tag.html#git_strarray_dispose-15"] + } }, - "git_odb_init_backend": { + "git_submodule_update_options_init": { "type": "function", - "file": "sys/odb_backend.h", - "line": 116, - "lineto": 118, + "file": "git2/submodule.h", + "line": 180, + "lineto": 181, "args": [ { - "name": "backend", - "type": "git_odb_backend *", - "comment": "the `git_odb_backend` struct to initialize." + "name": "opts", + "type": "git_submodule_update_options *", + "comment": "The `git_submodule_update_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Version the struct; pass `GIT_ODB_BACKEND_VERSION`" + "comment": "The struct version; pass `GIT_SUBMODULE_UPDATE_OPTIONS_VERSION`." } ], - "argline": "git_odb_backend *backend, unsigned int version", - "sig": "git_odb_backend *::unsigned int", + "argline": "git_submodule_update_options *opts, unsigned int version", + "sig": "git_submodule_update_options *::unsigned int", "return": { "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_odb_backend with default values. Equivalent to\n creating an instance with GIT_ODB_BACKEND_INIT.

\n", - "comments": "", - "group": "odb" - }, - "git_openssl_set_locking": { - "type": "function", - "file": "sys/openssl.h", - "line": 34, - "lineto": 34, - "args": [], - "argline": "", - "sig": "", - "return": { - "type": "int", - "comment": " 0 on success, -1 if there are errors or if libgit2 was not\n built with OpenSSL and threading support." - }, - "description": "

Initialize the OpenSSL locks

\n", - "comments": "

OpenSSL requires the application to determine how it performs locking.

\n\n

This is a last-resort convenience function which libgit2 provides for allocating and initializing the locks as well as setting the locking function to use the system's native locking functions.

\n\n

The locking function will be cleared and the memory will be freed when you call git_threads_sutdown().

\n\n

If your programming language has an OpenSSL package/bindings, it likely sets up locking. You should very strongly prefer that over this function.

\n", - "group": "openssl" + "description": "

Initialize git_submodule_update_options structure

\n", + "comments": "

Initializes a git_submodule_update_options with default values. Equivalent to creating an instance with GIT_SUBMODULE_UPDATE_OPTIONS_INIT.

\n", + "group": "submodule" }, - "git_refdb_init_backend": { + "git_submodule_update": { "type": "function", - "file": "sys/refdb_backend.h", - "line": 183, - "lineto": 185, + "file": "git2/submodule.h", + "line": 201, + "lineto": 201, "args": [ { - "name": "backend", - "type": "git_refdb_backend *", - "comment": "the `git_refdb_backend` struct to initialize" + "name": "submodule", + "type": "git_submodule *", + "comment": "Submodule object" }, { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_REFDB_BACKEND_VERSION`" + "name": "init", + "type": "int", + "comment": "If the submodule is not initialized, setting this flag to true\n will initialize the submodule before updating. Otherwise, this will\n return an error if attempting to update an uninitialized repository.\n but setting this to true forces them to be updated." + }, + { + "name": "options", + "type": "git_submodule_update_options *", + "comment": "configuration options for the update. If NULL, the\n function works as though GIT_SUBMODULE_UPDATE_OPTIONS_INIT was passed." } ], - "argline": "git_refdb_backend *backend, unsigned int version", - "sig": "git_refdb_backend *::unsigned int", + "argline": "git_submodule *submodule, int init, git_submodule_update_options *options", + "sig": "git_submodule *::int::git_submodule_update_options *", "return": { "type": "int", - "comment": " Zero on success; -1 on failure." + "comment": " 0 on success, any non-zero return value from a callback\n function, or a negative value to indicate an error (use\n `git_error_last` for a detailed error message)." }, - "description": "

Initializes a git_refdb_backend with default values. Equivalent to\n creating an instance with GIT_REFDB_BACKEND_INIT.

\n", + "description": "

Update a submodule. This will clone a missing submodule and\n checkout the subrepository to the commit specified in the index of\n the containing repository. If the submodule repository doesn't contain\n the target commit (e.g. because fetchRecurseSubmodules isn't set), then\n the submodule is fetched using the fetch options supplied in options.

\n", "comments": "", - "group": "refdb" + "group": "submodule" }, - "git_refdb_backend_fs": { + "git_submodule_lookup": { "type": "function", - "file": "sys/refdb_backend.h", - "line": 198, - "lineto": 200, + "file": "git2/submodule.h", + "line": 230, + "lineto": 233, "args": [ { - "name": "backend_out", - "type": "git_refdb_backend **", - "comment": "Output pointer to the git_refdb_backend object" + "name": "out", + "type": "git_submodule **", + "comment": "Output ptr to submodule; pass NULL to just get return code" }, { "name": "repo", "type": "git_repository *", - "comment": "Git repository to access" + "comment": "The parent repository" + }, + { + "name": "name", + "type": "const char *", + "comment": "The name of or path to the submodule; trailing slashes okay" } ], - "argline": "git_refdb_backend **backend_out, git_repository *repo", - "sig": "git_refdb_backend **::git_repository *", + "argline": "git_submodule **out, git_repository *repo, const char *name", + "sig": "git_submodule **::git_repository *::const char *", "return": { "type": "int", - "comment": " 0 on success, \n<\n0 error code on failure" + "comment": " 0 on success, GIT_ENOTFOUND if submodule does not exist,\n GIT_EEXISTS if a repository is found in working directory only,\n -1 on other errors." }, - "description": "

Constructors for default filesystem-based refdb backend

\n", - "comments": "

Under normal usage, this is called for you when the repository is opened / created, but you can use this to explicitly construct a filesystem refdb backend for a repository.

\n", - "group": "refdb" + "description": "

Lookup submodule information by name or path.

\n", + "comments": "

Given either the submodule name or path (they are usually the same), this returns a structure describing the submodule.

\n\n

There are two expected error scenarios:

\n\n\n\n

You must call git_submodule_free when done with the submodule.

\n", + "group": "submodule" }, - "git_refdb_set_backend": { + "git_submodule_dup": { "type": "function", - "file": "sys/refdb_backend.h", - "line": 212, - "lineto": 214, + "file": "git2/submodule.h", + "line": 243, + "lineto": 243, "args": [ { - "name": "refdb", - "type": "git_refdb *", - "comment": "database to add the backend to" + "name": "out", + "type": "git_submodule **", + "comment": "Pointer to store the copy of the submodule." + }, + { + "name": "source", + "type": "git_submodule *", + "comment": "Original submodule to copy." + } + ], + "argline": "git_submodule **out, git_submodule *source", + "sig": "git_submodule **::git_submodule *", + "return": { "type": "int", "comment": " 0" }, + "description": "

Create an in-memory copy of a submodule. The copy must be explicitly\n free'd or it will leak.

\n", + "comments": "", + "group": "submodule" + }, + "git_submodule_free": { + "type": "function", + "file": "git2/submodule.h", + "line": 250, + "lineto": 250, + "args": [ + { + "name": "submodule", + "type": "git_submodule *", + "comment": "Submodule object" + } + ], + "argline": "git_submodule *submodule", + "sig": "git_submodule *", + "return": { "type": "void", "comment": null }, + "description": "

Release a submodule

\n", + "comments": "", + "group": "submodule" + }, + "git_submodule_foreach": { + "type": "function", + "file": "git2/submodule.h", + "line": 270, + "lineto": 273, + "args": [ + { + "name": "repo", + "type": "git_repository *", + "comment": "The repository" + }, + { + "name": "callback", + "type": "git_submodule_cb", + "comment": "Function to be called with the name of each submodule.\n Return a non-zero value to terminate the iteration." }, { - "name": "backend", - "type": "git_refdb_backend *", - "comment": "pointer to a git_refdb_backend instance" + "name": "payload", + "type": "void *", + "comment": "Extra data to pass to callback" } ], - "argline": "git_refdb *refdb, git_refdb_backend *backend", - "sig": "git_refdb *::git_refdb_backend *", + "argline": "git_repository *repo, git_submodule_cb callback, void *payload", + "sig": "git_repository *::git_submodule_cb::void *", "return": { "type": "int", - "comment": " 0 on success; error code otherwise" + "comment": " 0 on success, -1 on error, or non-zero return value of callback" }, - "description": "

Sets the custom backend to an existing reference DB

\n", - "comments": "

The git_refdb will take ownership of the git_refdb_backend so you should NOT free it after calling this function.

\n", - "group": "refdb" + "description": "

Iterate over all tracked submodules of a repository.

\n", + "comments": "

See the note on git_submodule above. This iterates over the tracked submodules as described therein.

\n\n

If you are concerned about items in the working directory that look like submodules but are not tracked, the diff API will generate a diff record for workdir items that look like submodules but are not tracked, showing them as added in the workdir. Also, the status API will treat the entire subdirectory of a contained git repo as a single GIT_STATUS_WT_NEW item.

\n", + "group": "submodule", + "examples": { + "status.c": ["ex/v1.9.1/status.html#git_submodule_foreach-19"] + } }, - "git_reference__alloc": { + "git_submodule_add_setup": { "type": "function", - "file": "sys/refs.h", - "line": 31, - "lineto": 34, + "file": "git2/submodule.h", + "line": 301, + "lineto": 306, "args": [ { - "name": "name", - "type": "const char *", - "comment": "the reference name" + "name": "out", + "type": "git_submodule **", + "comment": "The newly created submodule ready to open for clone" }, { - "name": "oid", - "type": "const git_oid *", - "comment": "the object id for a direct reference" + "name": "repo", + "type": "git_repository *", + "comment": "The repository in which you want to create the submodule" }, { - "name": "peel", - "type": "const git_oid *", - "comment": "the first non-tag object's OID, or NULL" - } - ], - "argline": "const char *name, const git_oid *oid, const git_oid *peel", - "sig": "const char *::const git_oid *::const git_oid *", - "return": { - "type": "git_reference *", - "comment": " the created git_reference or NULL on error" - }, - "description": "

Create a new direct reference from an OID.

\n", - "comments": "", - "group": "reference" - }, - "git_reference__alloc_symbolic": { - "type": "function", - "file": "sys/refs.h", - "line": 43, - "lineto": 45, - "args": [ - { - "name": "name", + "name": "url", "type": "const char *", - "comment": "the reference name" + "comment": "URL for the submodule's remote" }, { - "name": "target", + "name": "path", "type": "const char *", - "comment": "the target for a symbolic reference" + "comment": "Path at which the submodule should be created" + }, + { + "name": "use_gitlink", + "type": "int", + "comment": "Should workdir contain a gitlink to the repo in\n .git/modules vs. repo directly in workdir." } ], - "argline": "const char *name, const char *target", - "sig": "const char *::const char *", + "argline": "git_submodule **out, git_repository *repo, const char *url, const char *path, int use_gitlink", + "sig": "git_submodule **::git_repository *::const char *::const char *::int", "return": { - "type": "git_reference *", - "comment": " the created git_reference or NULL on error" + "type": "int", + "comment": " 0 on success, GIT_EEXISTS if submodule already exists,\n -1 on other errors." }, - "description": "

Create a new symbolic reference.

\n", - "comments": "", - "group": "reference" + "description": "

Set up a new git submodule for checkout.

\n", + "comments": "

This does "git submodule add" up to the fetch and checkout of the submodule contents. It preps a new submodule, creates an entry in .gitmodules and creates an empty initialized repository either at the given path in the working directory or in .git/modules with a gitlink from the working directory to the new repo.

\n\n

To fully emulate "git submodule add" call this function, then open the submodule repo and perform the clone step as needed (if you don't need anything custom see git_submodule_add_clone()). Lastly, call git_submodule_add_finalize() to wrap up adding the new submodule and .gitmodules to the index to be ready to commit.

\n\n

You must call git_submodule_free on the submodule object when done.

\n", + "group": "submodule" }, - "git_repository_new": { + "git_submodule_clone": { "type": "function", - "file": "sys/repository.h", - "line": 31, - "lineto": 31, + "file": "git2/submodule.h", + "line": 319, + "lineto": 322, "args": [ { "name": "out", "type": "git_repository **", - "comment": "The blank repository" + "comment": "The newly created repository object. Optional." + }, + { + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule currently waiting for its clone." + }, + { + "name": "opts", + "type": "const git_submodule_update_options *", + "comment": "The options to use." } ], - "argline": "git_repository **out", - "sig": "git_repository **", + "argline": "git_repository **out, git_submodule *submodule, const git_submodule_update_options *opts", + "sig": "git_repository **::git_submodule *::const git_submodule_update_options *", "return": { "type": "int", - "comment": " 0 on success, or an error code" + "comment": " 0 on success, -1 on other errors (see git_clone)." }, - "description": "

Create a new repository with neither backends nor config object

\n", - "comments": "

Note that this is only useful if you wish to associate the repository with a non-filesystem-backed object database and config store.

\n", - "group": "repository" + "description": "

Perform the clone step for a newly created submodule.

\n", + "comments": "

This performs the necessary git_clone to setup a newly-created submodule.

\n", + "group": "submodule" }, - "git_repository__cleanup": { + "git_submodule_add_finalize": { "type": "function", - "file": "sys/repository.h", - "line": 44, - "lineto": 44, + "file": "git2/submodule.h", + "line": 335, + "lineto": 335, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": null + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule to finish adding." } ], - "argline": "git_repository *repo", - "sig": "git_repository *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Reset all the internal state in a repository.

\n", - "comments": "

This will free all the mapped memory and internal objects of the repository and leave it in a "blank" state.

\n\n

There's no need to call this function directly unless you're trying to aggressively cleanup the repo before its deallocation. git_repository_free already performs this operation before deallocation the repo.

\n", - "group": "repository" + "argline": "git_submodule *submodule", + "sig": "git_submodule *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Resolve the setup of a new git submodule.

\n", + "comments": "

This should be called on a submodule once you have called add setup and done the clone of the submodule. This adds the .gitmodules file and the newly cloned submodule to the index to be ready to be committed (but doesn't actually do the commit).

\n", + "group": "submodule" }, - "git_repository_reinit_filesystem": { + "git_submodule_add_to_index": { "type": "function", - "file": "sys/repository.h", - "line": 61, - "lineto": 63, + "file": "git2/submodule.h", + "line": 347, + "lineto": 349, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule to add to the index" }, { - "name": "recurse_submodules", + "name": "write_index", "type": "int", - "comment": "Should submodules be updated recursively" + "comment": "Boolean if this should immediately write the index\n file. If you pass this as false, you will have to get the\n git_index and explicitly call `git_index_write()` on it to\n save the change." } ], - "argline": "git_repository *repo, int recurse_submodules", - "sig": "git_repository *::int", + "argline": "git_submodule *submodule, int write_index", + "sig": "git_submodule *::int", "return": { "type": "int", - "comment": " 0 on success, \n<\n 0 on error" + "comment": " 0 on success, \n<\n0 on failure" }, - "description": "

Update the filesystem config settings for an open repository

\n", - "comments": "

When a repository is initialized, config values are set based on the properties of the filesystem that the repository is on, such as "core.ignorecase", "core.filemode", "core.symlinks", etc. If the repository is moved to a new filesystem, these properties may no longer be correct and API calls may not behave as expected. This call reruns the phase of repository initialization that sets those properties to compensate for the current filesystem of the repo.

\n", - "group": "repository" + "description": "

Add current submodule HEAD commit to index of superproject.

\n", + "comments": "", + "group": "submodule" }, - "git_repository_set_config": { + "git_submodule_owner": { "type": "function", - "file": "sys/repository.h", - "line": 78, - "lineto": 78, + "file": "git2/submodule.h", + "line": 362, + "lineto": 362, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" - }, - { - "name": "config", - "type": "git_config *", - "comment": "A Config object" + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_repository *repo, git_config *config", - "sig": "git_repository *::git_config *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "void", - "comment": null + "type": "git_repository *", + "comment": " Pointer to `git_repository`" }, - "description": "

Set the configuration file for this repository

\n", - "comments": "

This configuration file will be used for all configuration queries involving this repository.

\n\n

The repository will keep a reference to the config file; the user must still free the config after setting it to the repository, or it will leak.

\n", - "group": "repository" + "description": "

Get the containing repository for a submodule.

\n", + "comments": "

This returns a pointer to the repository that contains the submodule. This is a just a reference to the repository that was passed to the original git_submodule_lookup() call, so if that repository has been freed, then this may be a dangling reference.

\n", + "group": "submodule" }, - "git_repository_set_odb": { + "git_submodule_name": { "type": "function", - "file": "sys/repository.h", - "line": 93, - "lineto": 93, + "file": "git2/submodule.h", + "line": 370, + "lineto": 370, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" - }, - { - "name": "odb", - "type": "git_odb *", - "comment": "An ODB object" + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_repository *repo, git_odb *odb", - "sig": "git_repository *::git_odb *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "void", - "comment": null + "type": "const char *", + "comment": " Pointer to the submodule name" }, - "description": "

Set the Object Database for this repository

\n", - "comments": "

The ODB will be used for all object-related operations involving this repository.

\n\n

The repository will keep a reference to the ODB; the user must still free the ODB object after setting it to the repository, or it will leak.

\n", - "group": "repository" + "description": "

Get the name of submodule.

\n", + "comments": "", + "group": "submodule", + "examples": { + "status.c": ["ex/v1.9.1/status.html#git_submodule_name-20"] + } }, - "git_repository_set_refdb": { + "git_submodule_path": { "type": "function", - "file": "sys/repository.h", - "line": 108, - "lineto": 108, + "file": "git2/submodule.h", + "line": 381, + "lineto": 381, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" - }, - { - "name": "refdb", - "type": "git_refdb *", - "comment": "An refdb object" + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_repository *repo, git_refdb *refdb", - "sig": "git_repository *::git_refdb *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "void", - "comment": null + "type": "const char *", + "comment": " Pointer to the submodule path" }, - "description": "

Set the Reference Database Backend for this repository

\n", - "comments": "

The refdb will be used for all reference related operations involving this repository.

\n\n

The repository will keep a reference to the refdb; the user must still free the refdb object after setting it to the repository, or it will leak.

\n", - "group": "repository" + "description": "

Get the path to the submodule.

\n", + "comments": "

The path is almost always the same as the submodule name, but the two are actually not required to match.

\n", + "group": "submodule", + "examples": { + "status.c": ["ex/v1.9.1/status.html#git_submodule_path-21"] + } }, - "git_repository_set_index": { + "git_submodule_url": { "type": "function", - "file": "sys/repository.h", - "line": 123, - "lineto": 123, + "file": "git2/submodule.h", + "line": 389, + "lineto": 389, "args": [ { - "name": "repo", - "type": "git_repository *", - "comment": "A repository object" - }, - { - "name": "index", - "type": "git_index *", - "comment": "An index object" + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_repository *repo, git_index *index", - "sig": "git_repository *::git_index *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "void", - "comment": null + "type": "const char *", + "comment": " Pointer to the submodule url" }, - "description": "

Set the index file for this repository

\n", - "comments": "

This index will be used for all index-related operations involving this repository.

\n\n

The repository will keep a reference to the index file; the user must still free the index after setting it to the repository, or it will leak.

\n", - "group": "repository" + "description": "

Get the URL for the submodule.

\n", + "comments": "", + "group": "submodule" }, - "git_repository_set_bare": { + "git_submodule_resolve_url": { "type": "function", - "file": "sys/repository.h", - "line": 136, - "lineto": 136, + "file": "git2/submodule.h", + "line": 399, + "lineto": 399, "args": [ + { + "name": "out", + "type": "git_buf *", + "comment": "buffer to store the absolute submodule url in" + }, { "name": "repo", "type": "git_repository *", - "comment": "Repo to make bare" + "comment": "Pointer to repository object" + }, + { "name": "url", "type": "const char *", "comment": "Relative url" } + ], + "argline": "git_buf *out, git_repository *repo, const char *url", + "sig": "git_buf *::git_repository *::const char *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Resolve a submodule url relative to the given repository.

\n", + "comments": "", + "group": "submodule" + }, + "git_submodule_branch": { + "type": "function", + "file": "git2/submodule.h", + "line": 407, + "lineto": 407, + "args": [ + { + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_repository *repo", - "sig": "git_repository *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "int", - "comment": " 0 on success, \n<\n0 on failure" + "type": "const char *", + "comment": " Pointer to the submodule branch" }, - "description": "

Set a repository to be bare.

\n", - "comments": "

Clear the working directory and set core.bare to true. You may also want to call git_repository_set_index(repo, NULL) since a bare repo typically does not have an index, but this function will not do that for you.

\n", - "group": "repository" + "description": "

Get the branch for the submodule.

\n", + "comments": "", + "group": "submodule" }, - "git_repository_submodule_cache_all": { + "git_submodule_set_branch": { "type": "function", - "file": "sys/repository.h", - "line": 149, - "lineto": 150, + "file": "git2/submodule.h", + "line": 420, + "lineto": 420, "args": [ { "name": "repo", "type": "git_repository *", - "comment": "the repository whose submodules will be cached." + "comment": "the repository to affect" + }, + { + "name": "name", + "type": "const char *", + "comment": "the name of the submodule to configure" + }, + { + "name": "branch", + "type": "const char *", + "comment": "Branch that should be used for the submodule" } ], - "argline": "git_repository *repo", - "sig": "git_repository *", + "argline": "git_repository *repo, const char *name, const char *branch", + "sig": "git_repository *::const char *::const char *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, \n<\n0 on failure" }, - "description": "

Load and cache all submodules.

\n", - "comments": "

Because the .gitmodules file is unstructured, loading submodules is an O(N) operation. Any operation (such as git_rebase_init) that requires accessing all submodules is O(N^2) in the number of submodules, if it has to look each one up individually. This function loads all submodules and caches them so that subsequent calls to git_submodule_lookup are O(1).

\n", - "group": "repository" + "description": "

Set the branch for the submodule in the configuration

\n", + "comments": "

After calling this, you may wish to call git_submodule_sync() to write the changes to the checked out submodule repository.

\n", + "group": "submodule" }, - "git_repository_submodule_cache_clear": { + "git_submodule_set_url": { "type": "function", - "file": "sys/repository.h", - "line": 164, - "lineto": 165, + "file": "git2/submodule.h", + "line": 434, + "lineto": 434, "args": [ { "name": "repo", "type": "git_repository *", - "comment": "the repository whose submodule cache will be cleared" + "comment": "the repository to affect" + }, + { + "name": "name", + "type": "const char *", + "comment": "the name of the submodule to configure" + }, + { + "name": "url", + "type": "const char *", + "comment": "URL that should be used for the submodule" } ], - "argline": "git_repository *repo", - "sig": "git_repository *", + "argline": "git_repository *repo, const char *name, const char *url", + "sig": "git_repository *::const char *::const char *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, \n<\n0 on failure" }, - "description": "

Clear the submodule cache.

\n", - "comments": "

Clear the submodule cache populated by git_repository_submodule_cache_all. If there is no cache, do nothing.

\n\n

The cache incorporates data from the repository's configuration, as well as the state of the working tree, the index, and HEAD. So any time any of these has changed, the cache might become invalid.

\n", - "group": "repository" + "description": "

Set the URL for the submodule in the configuration

\n", + "comments": "

After calling this, you may wish to call git_submodule_sync() to write the changes to the checked out submodule repository.

\n", + "group": "submodule" }, - "git_stream_register_tls": { + "git_submodule_index_id": { "type": "function", - "file": "sys/stream.h", - "line": 54, - "lineto": 54, + "file": "git2/submodule.h", + "line": 442, + "lineto": 442, "args": [ { - "name": "ctor", - "type": "git_stream_cb", - "comment": "the constructor to use" + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_stream_cb ctor", - "sig": "git_stream_cb", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Register a TLS stream constructor for the library to use

\n", - "comments": "

If a constructor is already set, it will be overwritten. Pass NULL in order to deregister the current constructor.

\n", - "group": "stream" - }, - "git_time_monotonic": { - "type": "function", - "file": "sys/time.h", - "line": 27, - "lineto": 27, - "args": [], - "argline": "", - "sig": "", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "double", - "comment": null + "type": "const git_oid *", + "comment": " Pointer to git_oid or NULL if submodule is not in index." }, - "description": "

Return a monotonic time value, useful for measuring running time\n and setting up timeouts.

\n", - "comments": "

The returned value is an arbitrary point in time -- it can only be used when comparing it to another git_time_monotonic call.

\n\n

The time is returned in seconds, with a decimal fraction that differs on accuracy based on the underlying system, but should be least accurate to Nanoseconds.

\n\n

This function cannot fail.

\n", - "group": "time" + "description": "

Get the OID for the submodule in the index.

\n", + "comments": "", + "group": "submodule" }, - "git_transport_init": { + "git_submodule_head_id": { "type": "function", - "file": "sys/transport.h", - "line": 119, - "lineto": 121, + "file": "git2/submodule.h", + "line": 450, + "lineto": 450, "args": [ { - "name": "opts", - "type": "git_transport *", - "comment": "the `git_transport` struct to initialize" - }, - { - "name": "version", - "type": "unsigned int", - "comment": "Version of struct; pass `GIT_TRANSPORT_VERSION`" + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_transport *opts, unsigned int version", - "sig": "git_transport *::unsigned int", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "int", - "comment": " Zero on success; -1 on failure." + "type": "const git_oid *", + "comment": " Pointer to git_oid or NULL if submodule is not in the HEAD." }, - "description": "

Initializes a git_transport with default values. Equivalent to\n creating an instance with GIT_TRANSPORT_INIT.

\n", + "description": "

Get the OID for the submodule in the current HEAD tree.

\n", "comments": "", - "group": "transport" + "group": "submodule" }, - "git_transport_new": { + "git_submodule_wd_id": { "type": "function", - "file": "sys/transport.h", - "line": 133, - "lineto": 133, + "file": "git2/submodule.h", + "line": 463, + "lineto": 463, "args": [ { - "name": "out", - "type": "git_transport **", - "comment": "The newly created transport (out)" - }, - { - "name": "owner", - "type": "git_remote *", - "comment": "The git_remote which will own this transport" - }, - { - "name": "url", - "type": "const char *", - "comment": "The URL to connect to" + "name": "submodule", + "type": "git_submodule *", + "comment": "Pointer to submodule object" } ], - "argline": "git_transport **out, git_remote *owner, const char *url", - "sig": "git_transport **::git_remote *::const char *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "const git_oid *", + "comment": " Pointer to git_oid or NULL if submodule is not checked out." }, - "description": "

Function to use to create a transport from a URL. The transport database\n is scanned to find a transport that implements the scheme of the URI (i.e.\n git:// or http://) and a transport object is returned to the caller.

\n", - "comments": "", - "group": "transport" + "description": "

Get the OID for the submodule in the current working directory.

\n", + "comments": "

This returns the OID that corresponds to looking up 'HEAD' in the checked out submodule. If there are pending changes in the index or anything else, this won't notice that. You should call git_submodule_status() for a more complete picture about the state of the working directory.

\n", + "group": "submodule" }, - "git_transport_ssh_with_paths": { + "git_submodule_ignore": { "type": "function", - "file": "sys/transport.h", - "line": 149, - "lineto": 149, + "file": "git2/submodule.h", + "line": 488, + "lineto": 489, "args": [ { - "name": "out", - "type": "git_transport **", - "comment": "the resulting transport" - }, - { - "name": "owner", - "type": "git_remote *", - "comment": "the owning remote" - }, - { - "name": "payload", - "type": "void *", - "comment": "a strarray with the paths" + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule to check" } ], - "argline": "git_transport **out, git_remote *owner, void *payload", - "sig": "git_transport **::git_remote *::void *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "git_submodule_ignore_t", + "comment": " The current git_submodule_ignore_t valyue what will be used for\n this submodule." }, - "description": "

Create an ssh transport with custom git command paths

\n", - "comments": "

This is a factory function suitable for setting as the transport callback in a remote (or for a clone in the options).

\n\n

The payload argument must be a strarray pointer with the paths for the git-upload-pack and git-receive-pack at index 0 and 1.

\n", - "group": "transport" + "description": "

Get the ignore rule that will be used for the submodule.

\n", + "comments": "

These values control the behavior of git_submodule_status() for this submodule. There are four ignore values:

\n\n\n", + "group": "submodule" }, - "git_transport_register": { + "git_submodule_set_ignore": { "type": "function", - "file": "sys/transport.h", - "line": 164, - "lineto": 167, + "file": "git2/submodule.h", + "line": 501, + "lineto": 504, "args": [ { - "name": "prefix", - "type": "const char *", - "comment": "The scheme (ending in \"://\") to match, i.e. \"git://\"" + "name": "repo", + "type": "git_repository *", + "comment": "the repository to affect" }, { - "name": "cb", - "type": "git_transport_cb", - "comment": "The callback used to create an instance of the transport" + "name": "name", + "type": "const char *", + "comment": "the name of the submdule" }, { - "name": "param", - "type": "void *", - "comment": "A fixed parameter to pass to cb at creation time" + "name": "ignore", + "type": "git_submodule_ignore_t", + "comment": "The new value for the ignore rule" } ], - "argline": "const char *prefix, git_transport_cb cb, void *param", - "sig": "const char *::git_transport_cb::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Add a custom transport definition, to be used in addition to the built-in\n set of transports that come with libgit2.

\n", - "comments": "

The caller is responsible for synchronizing calls to git_transport_register and git_transport_unregister with other calls to the library that instantiate transports.

\n", - "group": "transport" + "argline": "git_repository *repo, const char *name, git_submodule_ignore_t ignore", + "sig": "git_repository *::const char *::git_submodule_ignore_t", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Set the ignore rule for the submodule in the configuration

\n", + "comments": "

This does not affect any currently-loaded instances.

\n", + "group": "submodule" }, - "git_transport_unregister": { + "git_submodule_update_strategy": { "type": "function", - "file": "sys/transport.h", - "line": 177, - "lineto": 178, + "file": "git2/submodule.h", + "line": 516, + "lineto": 517, "args": [ { - "name": "prefix", - "type": "const char *", - "comment": "From the previous call to git_transport_register" + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule to check" } ], - "argline": "const char *prefix", - "sig": "const char *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "git_submodule_update_t", + "comment": " The current git_submodule_update_t value that will be used\n for this submodule." }, - "description": "

Unregister a custom transport definition which was previously registered\n with git_transport_register.

\n", - "comments": "", - "group": "transport" + "description": "

Get the update rule that will be used for the submodule.

\n", + "comments": "

This value controls the behavior of the git submodule update command. There are four useful values documented with git_submodule_update_t.

\n", + "group": "submodule" }, - "git_transport_dummy": { + "git_submodule_set_update": { "type": "function", - "file": "sys/transport.h", - "line": 191, - "lineto": 194, + "file": "git2/submodule.h", + "line": 529, + "lineto": 532, "args": [ { - "name": "out", - "type": "git_transport **", - "comment": "The newly created transport (out)" + "name": "repo", + "type": "git_repository *", + "comment": "the repository to affect" }, { - "name": "owner", - "type": "git_remote *", - "comment": "The git_remote which will own this transport" + "name": "name", + "type": "const char *", + "comment": "the name of the submodule to configure" }, { - "name": "payload", - "type": "void *", - "comment": "You must pass NULL for this parameter." + "name": "update", + "type": "git_submodule_update_t", + "comment": "The new value to use" } ], - "argline": "git_transport **out, git_remote *owner, void *payload", - "sig": "git_transport **::git_remote *::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create an instance of the dummy transport.

\n", - "comments": "", - "group": "transport" + "argline": "git_repository *repo, const char *name, git_submodule_update_t update", + "sig": "git_repository *::const char *::git_submodule_update_t", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Set the update rule for the submodule in the configuration

\n", + "comments": "

This setting won't affect any existing instances.

\n", + "group": "submodule" }, - "git_transport_local": { + "git_submodule_fetch_recurse_submodules": { "type": "function", - "file": "sys/transport.h", - "line": 204, - "lineto": 207, + "file": "git2/submodule.h", + "line": 546, + "lineto": 547, "args": [ { - "name": "out", - "type": "git_transport **", - "comment": "The newly created transport (out)" - }, - { - "name": "owner", - "type": "git_remote *", - "comment": "The git_remote which will own this transport" - }, - { - "name": "payload", - "type": "void *", - "comment": "You must pass NULL for this parameter." + "name": "submodule", + "type": "git_submodule *", + "comment": "the submodule to examine" } ], - "argline": "git_transport **out, git_remote *owner, void *payload", - "sig": "git_transport **::git_remote *::void *", + "argline": "git_submodule *submodule", + "sig": "git_submodule *", "return": { - "type": "int", - "comment": " 0 or an error code" + "type": "git_submodule_recurse_t", + "comment": " the submodule recursion configuration" }, - "description": "

Create an instance of the local transport.

\n", - "comments": "", - "group": "transport" + "description": "

Read the fetchRecurseSubmodules rule for a submodule.

\n", + "comments": "

This accesses the submodule..fetchRecurseSubmodules value for the submodule that controls fetching behavior for the submodule.

\n\n

Note that at this time, libgit2 does not honor this setting and the fetch functionality current ignores submodules.

\n", + "group": "submodule" }, - "git_transport_smart": { + "git_submodule_set_fetch_recurse_submodules": { "type": "function", - "file": "sys/transport.h", - "line": 217, - "lineto": 220, + "file": "git2/submodule.h", + "line": 559, + "lineto": 562, "args": [ { - "name": "out", - "type": "git_transport **", - "comment": "The newly created transport (out)" + "name": "repo", + "type": "git_repository *", + "comment": "the repository to affect" }, { - "name": "owner", - "type": "git_remote *", - "comment": "The git_remote which will own this transport" + "name": "name", + "type": "const char *", + "comment": "the submodule to configure" }, { - "name": "payload", - "type": "void *", - "comment": "A pointer to a git_smart_subtransport_definition" + "name": "fetch_recurse_submodules", + "type": "git_submodule_recurse_t", + "comment": "the submodule recursion configuration" } ], - "argline": "git_transport **out, git_remote *owner, void *payload", - "sig": "git_transport **::git_remote *::void *", + "argline": "git_repository *repo, const char *name, git_submodule_recurse_t fetch_recurse_submodules", + "sig": "git_repository *::const char *::git_submodule_recurse_t", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " old value for fetchRecurseSubmodules" }, - "description": "

Create an instance of the smart transport.

\n", - "comments": "", - "group": "transport" + "description": "

Set the fetchRecurseSubmodules rule for a submodule in the configuration

\n", + "comments": "

This setting won't affect any existing instances.

\n", + "group": "submodule" }, - "git_transport_smart_certificate_check": { + "git_submodule_init": { "type": "function", - "file": "sys/transport.h", - "line": 231, - "lineto": 231, + "file": "git2/submodule.h", + "line": 577, + "lineto": 577, "args": [ { - "name": "transport", - "type": "git_transport *", - "comment": "a smart transport" - }, - { - "name": "cert", - "type": "git_cert *", - "comment": "the certificate to pass to the caller" - }, - { - "name": "valid", - "type": "int", - "comment": "whether we believe the certificate is valid" + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule to write into the superproject config" }, { - "name": "hostname", - "type": "const char *", - "comment": "the hostname we connected to" + "name": "overwrite", + "type": "int", + "comment": "By default, existing entries will not be overwritten,\n but setting this to true forces them to be updated." } ], - "argline": "git_transport *transport, git_cert *cert, int valid, const char *hostname", - "sig": "git_transport *::git_cert *::int::const char *", + "argline": "git_submodule *submodule, int overwrite", + "sig": "git_submodule *::int", "return": { "type": "int", - "comment": " the return value of the callback" + "comment": " 0 on success, \n<\n0 on failure." }, - "description": "

Call the certificate check for this transport.

\n", - "comments": "", - "group": "transport" + "description": "

Copy submodule info into ".git/config" file.

\n", + "comments": "

Just like "git submodule init", this copies information about the submodule into ".git/config". You can use the accessor functions above to alter the in-memory git_submodule object and control what is written to the config, overriding what is in .gitmodules.

\n", + "group": "submodule" }, - "git_transport_smart_credentials": { + "git_submodule_repo_init": { "type": "function", - "file": "sys/transport.h", - "line": 242, - "lineto": 242, + "file": "git2/submodule.h", + "line": 592, + "lineto": 595, "args": [ { "name": "out", - "type": "git_cred **", - "comment": "the pointer where the creds are to be stored" - }, - { - "name": "transport", - "type": "git_transport *", - "comment": "a smart transport" + "type": "git_repository **", + "comment": "Output pointer to the created git repository." }, { - "name": "user", - "type": "const char *", - "comment": "the user we saw on the url (if any)" + "name": "sm", + "type": "const git_submodule *", + "comment": "The submodule to create a new subrepository from." }, { - "name": "methods", + "name": "use_gitlink", "type": "int", - "comment": "available methods for authentication" + "comment": "Should the workdir contain a gitlink to\n the repo in .git/modules vs. repo directly in workdir." } ], - "argline": "git_cred **out, git_transport *transport, const char *user, int methods", - "sig": "git_cred **::git_transport *::const char *::int", + "argline": "git_repository **out, const git_submodule *sm, int use_gitlink", + "sig": "git_repository **::const git_submodule *::int", "return": { "type": "int", - "comment": " the return value of the callback" + "comment": " 0 on success, \n<\n0 on failure." }, - "description": "

Call the credentials callback for this transport

\n", - "comments": "", - "group": "transport" + "description": "

Set up the subrepository for a submodule in preparation for clone.

\n", + "comments": "

This function can be called to init and set up a submodule repository from a submodule in preparation to clone it from its remote.

\n", + "group": "submodule" }, - "git_transport_smart_proxy_options": { + "git_submodule_sync": { "type": "function", - "file": "sys/transport.h", - "line": 252, - "lineto": 252, + "file": "git2/submodule.h", + "line": 608, + "lineto": 608, "args": [ { - "name": "out", - "type": "git_proxy_options *", - "comment": "options struct to fill" + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule to copy." + } + ], + "argline": "git_submodule *submodule", + "sig": "git_submodule *", + "return": { "type": "int", "comment": " 0 or an error code." }, + "description": "

Copy submodule remote info into submodule repo.

\n", + "comments": "

This copies the information about the submodules URL into the checked out submodule config, acting like "git submodule sync". This is useful if you have altered the URL for the submodule (or it has been altered by a fetch of upstream changes) and you need to update your local repo.

\n", + "group": "submodule" + }, + "git_submodule_open": { + "type": "function", + "file": "git2/submodule.h", + "line": 622, + "lineto": 624, + "args": [ + { + "name": "repo", + "type": "git_repository **", + "comment": "Pointer to the submodule repo which was opened" }, { - "name": "transport", - "type": "git_transport *", - "comment": "the transport to extract the data from." + "name": "submodule", + "type": "git_submodule *", + "comment": "Submodule to be opened" } ], - "argline": "git_proxy_options *out, git_transport *transport", - "sig": "git_proxy_options *::git_transport *", + "argline": "git_repository **repo, git_submodule *submodule", + "sig": "git_repository **::git_submodule *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, \n<\n0 if submodule repo could not be opened." }, - "description": "

Get a copy of the proxy options

\n", - "comments": "

The url is copied and must be freed by the caller.

\n", - "group": "transport" + "description": "

Open the repository for a submodule.

\n", + "comments": "

This is a newly opened repository object. The caller is responsible for calling git_repository_free() on it when done. Multiple calls to this function will return distinct git_repository objects. This will only work if the submodule is checked out into the working directory.

\n", + "group": "submodule" }, - "git_smart_subtransport_http": { + "git_submodule_reload": { "type": "function", - "file": "sys/transport.h", - "line": 362, - "lineto": 365, + "file": "git2/submodule.h", + "line": 636, + "lineto": 636, "args": [ { - "name": "out", - "type": "git_smart_subtransport **", - "comment": "The newly created subtransport" - }, - { - "name": "owner", - "type": "git_transport *", - "comment": "The smart transport to own this subtransport" + "name": "submodule", + "type": "git_submodule *", + "comment": "The submodule to reload" }, { - "name": "param", - "type": "void *", - "comment": null + "name": "force", + "type": "int", + "comment": "Force reload even if the data doesn't seem out of date" } ], - "argline": "git_smart_subtransport **out, git_transport *owner, void *param", - "sig": "git_smart_subtransport **::git_transport *::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create an instance of the http subtransport. This subtransport\n also supports https. On Win32, this subtransport may be implemented\n using the WinHTTP library.

\n", - "comments": "", - "group": "smart" + "argline": "git_submodule *submodule, int force", + "sig": "git_submodule *::int", + "return": { "type": "int", "comment": " 0 on success, \n<\n0 on error" }, + "description": "

Reread submodule info from config, index, and HEAD.

\n", + "comments": "

Call this to reread cached submodule information for this submodule if you have reason to believe that it has changed.

\n", + "group": "submodule" }, - "git_smart_subtransport_git": { + "git_submodule_status": { "type": "function", - "file": "sys/transport.h", - "line": 374, - "lineto": 377, + "file": "git2/submodule.h", + "line": 652, + "lineto": 656, "args": [ { - "name": "out", - "type": "git_smart_subtransport **", - "comment": "The newly created subtransport" + "name": "status", + "type": "unsigned int *", + "comment": "Combination of `GIT_SUBMODULE_STATUS` flags" }, { - "name": "owner", - "type": "git_transport *", - "comment": "The smart transport to own this subtransport" + "name": "repo", + "type": "git_repository *", + "comment": "the repository in which to look" }, { - "name": "param", - "type": "void *", - "comment": null + "name": "name", + "type": "const char *", + "comment": "name of the submodule" + }, + { + "name": "ignore", + "type": "git_submodule_ignore_t", + "comment": "the ignore rules to follow" } ], - "argline": "git_smart_subtransport **out, git_transport *owner, void *param", - "sig": "git_smart_subtransport **::git_transport *::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create an instance of the git subtransport.

\n", - "comments": "", - "group": "smart" + "argline": "unsigned int *status, git_repository *repo, const char *name, git_submodule_ignore_t ignore", + "sig": "unsigned int *::git_repository *::const char *::git_submodule_ignore_t", + "return": { "type": "int", "comment": " 0 on success, \n<\n0 on error" }, + "description": "

Get the status for a submodule.

\n", + "comments": "

This looks at a submodule and tries to determine the status. It will return a combination of the GIT_SUBMODULE_STATUS values above. How deeply it examines the working directory to do this will depend on the git_submodule_ignore_t value for the submodule.

\n", + "group": "submodule", + "examples": { + "status.c": ["ex/v1.9.1/status.html#git_submodule_status-22"] + } }, - "git_smart_subtransport_ssh": { + "git_submodule_location": { "type": "function", - "file": "sys/transport.h", - "line": 386, - "lineto": 389, + "file": "git2/submodule.h", + "line": 672, + "lineto": 674, "args": [ { - "name": "out", - "type": "git_smart_subtransport **", - "comment": "The newly created subtransport" - }, - { - "name": "owner", - "type": "git_transport *", - "comment": "The smart transport to own this subtransport" + "name": "location_status", + "type": "unsigned int *", + "comment": "Combination of first four `GIT_SUBMODULE_STATUS` flags" }, { - "name": "param", - "type": "void *", - "comment": null + "name": "submodule", + "type": "git_submodule *", + "comment": "Submodule for which to get status" } ], - "argline": "git_smart_subtransport **out, git_transport *owner, void *param", - "sig": "git_smart_subtransport **::git_transport *::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Create an instance of the ssh subtransport.

\n", - "comments": "", - "group": "smart" + "argline": "unsigned int *location_status, git_submodule *submodule", + "sig": "unsigned int *::git_submodule *", + "return": { "type": "int", "comment": " 0 on success, \n<\n0 on error" }, + "description": "

Get the locations of submodule information.

\n", + "comments": "

This is a bit like a very lightweight version of git_submodule_status. It just returns a made of the first four submodule status values (i.e. the ones like GIT_SUBMODULE_STATUS_IN_HEAD, etc) that tell you where the submodule data comes from (i.e. the HEAD commit, gitmodules file, etc.). This can be useful if you want to know if the submodule is present in the working directory at this point in time, etc.

\n", + "group": "submodule" }, "git_tag_lookup": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 33, "lineto": 34, "args": [ @@ -22872,22 +21466,15 @@ ], "argline": "git_tag **out, git_repository *repo, const git_oid *id", "sig": "git_tag **::git_repository *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a tag object from the repository.

\n", "comments": "", "group": "tag", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_tag_lookup-84" - ] - } + "examples": { "general.c": ["ex/v1.9.1/general.html#git_tag_lookup-91"] } }, "git_tag_lookup_prefix": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 48, "lineto": 49, "args": [ @@ -22914,44 +21501,30 @@ ], "argline": "git_tag **out, git_repository *repo, const git_oid *id, size_t len", "sig": "git_tag **::git_repository *::const git_oid *::size_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a tag object from the repository,\n given a prefix of its identifier (short id).

\n", "comments": "", "group": "tag" }, "git_tag_free": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 61, "lineto": 61, "args": [ - { - "name": "tag", - "type": "git_tag *", - "comment": "the tag to close" - } + { "name": "tag", "type": "git_tag *", "comment": "the tag to close" } ], "argline": "git_tag *tag", "sig": "git_tag *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an open tag

\n", "comments": "

You can no longer use the git_tag pointer after this call.

\n\n

IMPORTANT: You MUST call this method when you are through with a tag to release memory. Failure to do so will cause a memory leak.

\n", "group": "tag", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_tag_free-85" - ] - } + "examples": { "general.c": ["ex/v1.9.1/general.html#git_tag_free-92"] } }, "git_tag_id": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 69, "lineto": 69, "args": [ @@ -22973,7 +21546,7 @@ }, "git_tag_owner": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 77, "lineto": 77, "args": [ @@ -22995,7 +21568,7 @@ }, "git_tag_target": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 89, "lineto": 89, "args": [ @@ -23012,22 +21585,15 @@ ], "argline": "git_object **target_out, const git_tag *tag", "sig": "git_object **::const git_tag *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Get the tagged object of a tag

\n", "comments": "

This method performs a repository lookup for the given object and returns it

\n", "group": "tag", - "examples": { - "general.c": [ - "ex/HEAD/general.html#git_tag_target-86" - ] - } + "examples": { "general.c": ["ex/v1.9.1/general.html#git_tag_target-93"] } }, "git_tag_target_id": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 97, "lineto": 97, "args": [ @@ -23039,22 +21605,17 @@ ], "argline": "const git_tag *tag", "sig": "const git_tag *", - "return": { - "type": "const git_oid *", - "comment": " pointer to the OID" - }, + "return": { "type": "const git_oid *", "comment": " pointer to the OID" }, "description": "

Get the OID of the tagged object of a tag

\n", "comments": "", "group": "tag", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tag_target_id-35" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tag_target_id-31"] } }, "git_tag_target_type": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 105, "lineto": 105, "args": [ @@ -23067,24 +21628,20 @@ "argline": "const git_tag *tag", "sig": "const git_tag *", "return": { - "type": "git_otype", + "type": "git_object_t", "comment": " type of the tagged object" }, "description": "

Get the type of a tag's tagged object

\n", "comments": "", "group": "tag", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tag_target_type-36" - ], - "general.c": [ - "ex/HEAD/general.html#git_tag_target_type-87" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tag_target_type-32"], + "general.c": ["ex/v1.9.1/general.html#git_tag_target_type-94"] } }, "git_tag_name": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 113, "lineto": 113, "args": [ @@ -23096,28 +21653,19 @@ ], "argline": "const git_tag *tag", "sig": "const git_tag *", - "return": { - "type": "const char *", - "comment": " name of the tag" - }, + "return": { "type": "const char *", "comment": " name of the tag" }, "description": "

Get the name of a tag

\n", "comments": "", "group": "tag", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tag_name-37" - ], - "general.c": [ - "ex/HEAD/general.html#git_tag_name-88" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_tag_name-20" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tag_name-33"], + "general.c": ["ex/v1.9.1/general.html#git_tag_name-95"], + "tag.c": ["ex/v1.9.1/tag.html#git_tag_name-16"] } }, "git_tag_tagger": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 121, "lineto": 121, "args": [ @@ -23137,14 +21685,12 @@ "comments": "", "group": "tag", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tag_tagger-38" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tag_tagger-34"] } }, "git_tag_message": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 129, "lineto": 129, "args": [ @@ -23165,20 +21711,16 @@ "group": "tag", "examples": { "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tag_message-39", - "ex/HEAD/cat-file.html#git_tag_message-40" + "ex/v1.9.1/cat-file.html#git_tag_message-35", + "ex/v1.9.1/cat-file.html#git_tag_message-36" ], - "general.c": [ - "ex/HEAD/general.html#git_tag_message-89" - ], - "tag.c": [ - "ex/HEAD/tag.html#git_tag_message-21" - ] + "general.c": ["ex/v1.9.1/general.html#git_tag_message-96"], + "tag.c": ["ex/v1.9.1/tag.html#git_tag_message-17"] } }, "git_tag_create": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 171, "lineto": 178, "args": [ @@ -23227,15 +21769,11 @@ "description": "

Create a new tag in the repository from an object

\n", "comments": "

A new reference will also be created pointing to this tag object. If force is true and a reference already exists with the given name, it'll be replaced.

\n\n

The message will not be cleaned up. This can be achieved through git_message_prettify().

\n\n

The tag name will be checked for validity. You must avoid the characters '~', '^', ':', '\\', '?', '[', and '*', and the sequences ".." and "@{" which have special meaning to revparse.

\n", "group": "tag", - "examples": { - "tag.c": [ - "ex/HEAD/tag.html#git_tag_create-22" - ] - } + "examples": { "tag.c": ["ex/v1.9.1/tag.html#git_tag_create-18"] } }, "git_tag_annotation_create": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 203, "lineto": 209, "args": [ @@ -23272,17 +21810,14 @@ ], "argline": "git_oid *oid, git_repository *repo, const char *tag_name, const git_object *target, const git_signature *tagger, const char *message", "sig": "git_oid *::git_repository *::const char *::const git_object *::const git_signature *::const char *", - "return": { - "type": "int", - "comment": " 0 on success or an error code" - }, + "return": { "type": "int", "comment": " 0 on success or an error code" }, "description": "

Create a new tag in the object database pointing to a git_object

\n", "comments": "

The message will not be cleaned up. This can be achieved through git_message_prettify().

\n", "group": "tag" }, - "git_tag_create_frombuffer": { + "git_tag_create_from_buffer": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 220, "lineto": 224, "args": [ @@ -23296,16 +21831,8 @@ "type": "git_repository *", "comment": "Repository where to store the tag" }, - { - "name": "buffer", - "type": "const char *", - "comment": "Raw tag data" - }, - { - "name": "force", - "type": "int", - "comment": "Overwrite existing tags" - } + { "name": "buffer", "type": "const char *", "comment": "Raw tag data" }, + { "name": "force", "type": "int", "comment": "Overwrite existing tags" } ], "argline": "git_oid *oid, git_repository *repo, const char *buffer, int force", "sig": "git_oid *::git_repository *::const char *::int", @@ -23319,7 +21846,7 @@ }, "git_tag_create_lightweight": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 256, "lineto": 261, "args": [ @@ -23359,14 +21886,12 @@ "comments": "

A new direct reference will be created pointing to this target object. If force is true and a reference already exists with the given name, it'll be replaced.

\n\n

The tag name will be checked for validity. See git_tag_create() for rules about valid names.

\n", "group": "tag", "examples": { - "tag.c": [ - "ex/HEAD/tag.html#git_tag_create_lightweight-23" - ] + "tag.c": ["ex/v1.9.1/tag.html#git_tag_create_lightweight-19"] } }, "git_tag_delete": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 276, "lineto": 278, "args": [ @@ -23390,15 +21915,11 @@ "description": "

Delete an existing tag reference.

\n", "comments": "

The tag name will be checked for validity. See git_tag_create() for rules about valid names.

\n", "group": "tag", - "examples": { - "tag.c": [ - "ex/HEAD/tag.html#git_tag_delete-24" - ] - } + "examples": { "tag.c": ["ex/v1.9.1/tag.html#git_tag_delete-20"] } }, "git_tag_list": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 293, "lineto": 295, "args": [ @@ -23415,17 +21936,14 @@ ], "argline": "git_strarray *tag_names, git_repository *repo", "sig": "git_strarray *::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Fill a list with all the tags in the Repository

\n", "comments": "

The string array will be filled with the names of the matching tags; these values are owned by the user and should be free'd manually when no longer needed, using git_strarray_free.

\n", "group": "tag" }, "git_tag_list_match": { "type": "function", - "file": "tag.h", + "file": "git2/tag.h", "line": 315, "lineto": 318, "args": [ @@ -23447,30 +21965,19 @@ ], "argline": "git_strarray *tag_names, const char *pattern, git_repository *repo", "sig": "git_strarray *::const char *::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Fill a list with all the tags in the Repository\n which name match a defined pattern

\n", "comments": "

If an empty pattern is provided, all the tags will be returned.

\n\n

The string array will be filled with the names of the matching tags; these values are owned by the user and should be free'd manually when no longer needed, using git_strarray_free.

\n", "group": "tag", - "examples": { - "tag.c": [ - "ex/HEAD/tag.html#git_tag_list_match-25" - ] - } + "examples": { "tag.c": ["ex/v1.9.1/tag.html#git_tag_list_match-21"] } }, "git_tag_foreach": { "type": "function", - "file": "tag.h", - "line": 330, - "lineto": 333, + "file": "git2/tag.h", + "line": 340, + "lineto": 343, "args": [ - { - "name": "repo", - "type": "git_repository *", - "comment": "Repository" - }, + { "name": "repo", "type": "git_repository *", "comment": "Repository" }, { "name": "callback", "type": "git_tag_foreach_cb", @@ -23484,19 +21991,16 @@ ], "argline": "git_repository *repo, git_tag_foreach_cb callback, void *payload", "sig": "git_repository *::git_tag_foreach_cb::void *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success or an error code" }, "description": "

Call callback `cb' for each tag in the repository

\n", "comments": "", "group": "tag" }, "git_tag_peel": { "type": "function", - "file": "tag.h", - "line": 346, - "lineto": 348, + "file": "git2/tag.h", + "line": 356, + "lineto": 358, "args": [ { "name": "tag_target_out", @@ -23511,19 +22015,16 @@ ], "argline": "git_object **tag_target_out, const git_tag *tag", "sig": "git_object **::const git_tag *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Recursively peel a tag until a non tag git_object is found

\n", "comments": "

The retrieved tag_target object is owned by the repository and should be closed with the git_object_free method.

\n", "group": "tag" }, "git_tag_dup": { "type": "function", - "file": "tag.h", - "line": 357, - "lineto": 357, + "file": "git2/tag.h", + "line": 368, + "lineto": 368, "args": [ { "name": "out", @@ -23536,21 +22037,42 @@ "comment": "Original tag to copy" } ], - "argline": "git_tag **out, git_tag *source", - "sig": "git_tag **::git_tag *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Create an in-memory copy of a tag. The copy must be explicitly\n free'd or it will leak.

\n", + "argline": "git_tag **out, git_tag *source", + "sig": "git_tag **::git_tag *", + "return": { "type": "int", "comment": " 0" }, + "description": "

Create an in-memory copy of a tag. The copy must be explicitly\n free'd or it will leak.

\n", + "comments": "", + "group": "tag" + }, + "git_tag_name_is_valid": { + "type": "function", + "file": "git2/tag.h", + "line": 380, + "lineto": 380, + "args": [ + { + "name": "valid", + "type": "int *", + "comment": "output pointer to set with validity of given tag name" + }, + { + "name": "name", + "type": "const char *", + "comment": "a tag name to test" + } + ], + "argline": "int *valid, const char *name", + "sig": "int *::const char *", + "return": { "type": "int", "comment": " 0 on success or an error code" }, + "description": "

Determine whether a tag name is valid, meaning that (when prefixed\n with refs/tags/) that it is a valid reference name, and that any\n additional tag name restrictions are imposed (eg, it cannot start\n with a -).

\n", "comments": "", "group": "tag" }, "git_trace_set": { "type": "function", - "file": "trace.h", - "line": 63, - "lineto": 63, + "file": "git2/trace.h", + "line": 68, + "lineto": 68, "args": [ { "name": "level", @@ -23559,343 +22081,249 @@ }, { "name": "cb", - "type": "git_trace_callback", + "type": "git_trace_cb", "comment": "Function to call with trace data" } ], - "argline": "git_trace_level_t level, git_trace_callback cb", - "sig": "git_trace_level_t::git_trace_callback", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "argline": "git_trace_level_t level, git_trace_cb cb", + "sig": "git_trace_level_t::git_trace_cb", + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Sets the system tracing configuration to the specified level with the\n specified callback. When system events occur at a level equal to, or\n lower than, the given level they will be reported to the given callback.

\n", "comments": "", "group": "trace" }, - "git_cred_has_username": { + "git_transaction_new": { "type": "function", - "file": "transport.h", - "line": 190, - "lineto": 190, + "file": "git2/transaction.h", + "line": 32, + "lineto": 32, "args": [ { - "name": "cred", - "type": "git_cred *", - "comment": "object to check" + "name": "out", + "type": "git_transaction **", + "comment": "the resulting transaction" + }, + { + "name": "repo", + "type": "git_repository *", + "comment": "the repository in which to lock" } ], - "argline": "git_cred *cred", - "sig": "git_cred *", - "return": { - "type": "int", - "comment": " 1 if the credential object has non-NULL username, 0 otherwise" - }, - "description": "

Check whether a credential object contains username information.

\n", - "comments": "", - "group": "cred" + "argline": "git_transaction **out, git_repository *repo", + "sig": "git_transaction **::git_repository *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Create a new transaction object

\n", + "comments": "

This does not lock anything, but sets up the transaction object to know from which repository to lock.

\n", + "group": "transaction" }, - "git_cred_userpass_plaintext_new": { + "git_transaction_lock_ref": { "type": "function", - "file": "transport.h", - "line": 201, - "lineto": 204, + "file": "git2/transaction.h", + "line": 44, + "lineto": 44, "args": [ { - "name": "out", - "type": "git_cred **", - "comment": "The newly created credential object." - }, - { - "name": "username", - "type": "const char *", - "comment": "The username of the credential." + "name": "tx", + "type": "git_transaction *", + "comment": "the transaction" }, { - "name": "password", + "name": "refname", "type": "const char *", - "comment": "The password of the credential." + "comment": "the reference to lock" } ], - "argline": "git_cred **out, const char *username, const char *password", - "sig": "git_cred **::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 for success or an error code for failure" - }, - "description": "

Create a new plain-text username and password credential object.\n The supplied credential parameter will be internally duplicated.

\n", - "comments": "", - "group": "cred" + "argline": "git_transaction *tx, const char *refname", + "sig": "git_transaction *::const char *", + "return": { "type": "int", "comment": " 0 or an error message" }, + "description": "

Lock a reference

\n", + "comments": "

Lock the specified reference. This is the first step to updating a reference.

\n", + "group": "transaction" }, - "git_cred_ssh_key_new": { + "git_transaction_set_target": { "type": "function", - "file": "transport.h", - "line": 217, - "lineto": 222, + "file": "git2/transaction.h", + "line": 59, + "lineto": 59, "args": [ { - "name": "out", - "type": "git_cred **", - "comment": "The newly created credential object." + "name": "tx", + "type": "git_transaction *", + "comment": "the transaction" }, { - "name": "username", + "name": "refname", "type": "const char *", - "comment": "username to use to authenticate" + "comment": "reference to update" }, { - "name": "publickey", - "type": "const char *", - "comment": "The path to the public key of the credential." + "name": "target", + "type": "const git_oid *", + "comment": "target to set the reference to" }, { - "name": "privatekey", - "type": "const char *", - "comment": "The path to the private key of the credential." + "name": "sig", + "type": "const git_signature *", + "comment": "signature to use in the reflog; pass NULL to read the identity from the config" }, { - "name": "passphrase", + "name": "msg", "type": "const char *", - "comment": "The passphrase of the credential." + "comment": "message to use in the reflog" } ], - "argline": "git_cred **out, const char *username, const char *publickey, const char *privatekey, const char *passphrase", - "sig": "git_cred **::const char *::const char *::const char *::const char *", + "argline": "git_transaction *tx, const char *refname, const git_oid *target, const git_signature *sig, const char *msg", + "sig": "git_transaction *::const char *::const git_oid *::const git_signature *::const char *", "return": { "type": "int", - "comment": " 0 for success or an error code for failure" + "comment": " 0, GIT_ENOTFOUND if the reference is not among the locked ones, or an error code" }, - "description": "

Create a new passphrase-protected ssh key credential object.\n The supplied credential parameter will be internally duplicated.

\n", - "comments": "", - "group": "cred" + "description": "

Set the target of a reference

\n", + "comments": "

Set the target of the specified reference. This reference must be locked.

\n", + "group": "transaction" }, - "git_cred_ssh_interactive_new": { + "git_transaction_set_symbolic_target": { "type": "function", - "file": "transport.h", - "line": 233, - "lineto": 237, + "file": "git2/transaction.h", + "line": 74, + "lineto": 74, "args": [ { - "name": "out", - "type": "git_cred **", - "comment": null + "name": "tx", + "type": "git_transaction *", + "comment": "the transaction" }, { - "name": "username", + "name": "refname", "type": "const char *", - "comment": "Username to use to authenticate." + "comment": "reference to update" }, { - "name": "prompt_callback", - "type": "git_cred_ssh_interactive_callback", - "comment": "The callback method used for prompts." + "name": "target", + "type": "const char *", + "comment": "target to set the reference to" }, { - "name": "payload", - "type": "void *", - "comment": "Additional data to pass to the callback." - } - ], - "argline": "git_cred **out, const char *username, git_cred_ssh_interactive_callback prompt_callback, void *payload", - "sig": "git_cred **::const char *::git_cred_ssh_interactive_callback::void *", - "return": { - "type": "int", - "comment": " 0 for success or an error code for failure." - }, - "description": "

Create a new ssh keyboard-interactive based credential object.\n The supplied credential parameter will be internally duplicated.

\n", - "comments": "", - "group": "cred" - }, - "git_cred_ssh_key_from_agent": { - "type": "function", - "file": "transport.h", - "line": 247, - "lineto": 249, - "args": [ - { - "name": "out", - "type": "git_cred **", - "comment": "The newly created credential object." + "name": "sig", + "type": "const git_signature *", + "comment": "signature to use in the reflog; pass NULL to read the identity from the config" }, { - "name": "username", + "name": "msg", "type": "const char *", - "comment": "username to use to authenticate" + "comment": "message to use in the reflog" } ], - "argline": "git_cred **out, const char *username", - "sig": "git_cred **::const char *", + "argline": "git_transaction *tx, const char *refname, const char *target, const git_signature *sig, const char *msg", + "sig": "git_transaction *::const char *::const char *::const git_signature *::const char *", "return": { "type": "int", - "comment": " 0 for success or an error code for failure" + "comment": " 0, GIT_ENOTFOUND if the reference is not among the locked ones, or an error code" }, - "description": "

Create a new ssh key credential object used for querying an ssh-agent.\n The supplied credential parameter will be internally duplicated.

\n", - "comments": "", - "group": "cred" + "description": "

Set the target of a reference

\n", + "comments": "

Set the target of the specified reference. This reference must be locked.

\n", + "group": "transaction" }, - "git_cred_ssh_custom_new": { + "git_transaction_set_reflog": { "type": "function", - "file": "transport.h", - "line": 269, - "lineto": 275, + "file": "git2/transaction.h", + "line": 87, + "lineto": 87, "args": [ { - "name": "out", - "type": "git_cred **", - "comment": "The newly created credential object." - }, - { - "name": "username", - "type": "const char *", - "comment": "username to use to authenticate" + "name": "tx", + "type": "git_transaction *", + "comment": "the transaction" }, { - "name": "publickey", + "name": "refname", "type": "const char *", - "comment": "The bytes of the public key." - }, - { - "name": "publickey_len", - "type": "size_t", - "comment": "The length of the public key in bytes." - }, - { - "name": "sign_callback", - "type": "git_cred_sign_callback", - "comment": "The callback method to sign the data during the challenge." + "comment": "the reference whose reflog to set" }, { - "name": "payload", - "type": "void *", - "comment": "Additional data to pass to the callback." - } - ], - "argline": "git_cred **out, const char *username, const char *publickey, size_t publickey_len, git_cred_sign_callback sign_callback, void *payload", - "sig": "git_cred **::const char *::const char *::size_t::git_cred_sign_callback::void *", - "return": { - "type": "int", - "comment": " 0 for success or an error code for failure" - }, - "description": "

Create an ssh key credential with a custom signing function.

\n", - "comments": "

This lets you use your own function to sign the challenge.

\n\n

This function and its credential type is provided for completeness and wraps libssh2_userauth_publickey(), which is undocumented.

\n\n

The supplied credential parameter will be internally duplicated.

\n", - "group": "cred" - }, - "git_cred_default_new": { - "type": "function", - "file": "transport.h", - "line": 283, - "lineto": 283, - "args": [ - { - "name": "out", - "type": "git_cred **", - "comment": null + "name": "reflog", + "type": "const git_reflog *", + "comment": "the reflog as it should be written out" } ], - "argline": "git_cred **out", - "sig": "git_cred **", + "argline": "git_transaction *tx, const char *refname, const git_reflog *reflog", + "sig": "git_transaction *::const char *::const git_reflog *", "return": { "type": "int", - "comment": " 0 for success or an error code for failure" + "comment": " 0, GIT_ENOTFOUND if the reference is not among the locked ones, or an error code" }, - "description": "

Create a "default" credential usable for Negotiate mechanisms like NTLM\n or Kerberos authentication.

\n", - "comments": "", - "group": "cred" + "description": "

Set the reflog of a reference

\n", + "comments": "

Set the specified reference's reflog. If this is combined with setting the target, that update won't be written to the reflog.

\n", + "group": "transaction" }, - "git_cred_username_new": { + "git_transaction_remove": { "type": "function", - "file": "transport.h", - "line": 291, - "lineto": 291, + "file": "git2/transaction.h", + "line": 96, + "lineto": 96, "args": [ { - "name": "cred", - "type": "git_cred **", - "comment": null + "name": "tx", + "type": "git_transaction *", + "comment": "the transaction" }, { - "name": "username", + "name": "refname", "type": "const char *", - "comment": null + "comment": "the reference to remove" } ], - "argline": "git_cred **cred, const char *username", - "sig": "git_cred **::const char *", + "argline": "git_transaction *tx, const char *refname", + "sig": "git_transaction *::const char *", "return": { "type": "int", - "comment": null + "comment": " 0, GIT_ENOTFOUND if the reference is not among the locked ones, or an error code" }, - "description": "

Create a credential to specify a username.

\n", - "comments": "

This is used with ssh authentication to query for the username if none is specified in the url.

\n", - "group": "cred" + "description": "

Remove a reference

\n", + "comments": "", + "group": "transaction" }, - "git_cred_ssh_key_memory_new": { + "git_transaction_commit": { "type": "function", - "file": "transport.h", - "line": 303, - "lineto": 308, + "file": "git2/transaction.h", + "line": 107, + "lineto": 107, "args": [ { - "name": "out", - "type": "git_cred **", - "comment": "The newly created credential object." - }, - { - "name": "username", - "type": "const char *", - "comment": "username to use to authenticate." - }, - { - "name": "publickey", - "type": "const char *", - "comment": "The public key of the credential." - }, - { - "name": "privatekey", - "type": "const char *", - "comment": "The private key of the credential." - }, - { - "name": "passphrase", - "type": "const char *", - "comment": "The passphrase of the credential." + "name": "tx", + "type": "git_transaction *", + "comment": "the transaction" } ], - "argline": "git_cred **out, const char *username, const char *publickey, const char *privatekey, const char *passphrase", - "sig": "git_cred **::const char *::const char *::const char *::const char *", - "return": { - "type": "int", - "comment": " 0 for success or an error code for failure" - }, - "description": "

Create a new ssh key credential object reading the keys from memory.

\n", - "comments": "", - "group": "cred" + "argline": "git_transaction *tx", + "sig": "git_transaction *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Commit the changes from the transaction

\n", + "comments": "

Perform the changes that have been queued. The updates will be made one by one, and the first failure will stop the processing.

\n", + "group": "transaction" }, - "git_cred_free": { + "git_transaction_free": { "type": "function", - "file": "transport.h", - "line": 319, - "lineto": 319, + "file": "git2/transaction.h", + "line": 117, + "lineto": 117, "args": [ { - "name": "cred", - "type": "git_cred *", - "comment": "the object to free" + "name": "tx", + "type": "git_transaction *", + "comment": "the transaction" } ], - "argline": "git_cred *cred", - "sig": "git_cred *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Free a credential.

\n", - "comments": "

This is only necessary if you own the object; that is, if you are a transport.

\n", - "group": "cred" + "argline": "git_transaction *tx", + "sig": "git_transaction *", + "return": { "type": "void", "comment": null }, + "description": "

Free the resources allocated by this transaction

\n", + "comments": "

If any references remain locked, they will be unlocked without any changes made to them.

\n", + "group": "transaction" }, "git_tree_lookup": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 32, "lineto": 33, "args": [ @@ -23917,29 +22345,23 @@ ], "argline": "git_tree **out, git_repository *repo, const git_oid *id", "sig": "git_tree **::git_repository *::const git_oid *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a tree object from the repository.

\n", "comments": "", "group": "tree", "examples": { + "commit.c": ["ex/v1.9.1/commit.html#git_tree_lookup-13"], "general.c": [ - "ex/HEAD/general.html#git_tree_lookup-90", - "ex/HEAD/general.html#git_tree_lookup-91" - ], - "init.c": [ - "ex/HEAD/init.html#git_tree_lookup-14" + "ex/v1.9.1/general.html#git_tree_lookup-97", + "ex/v1.9.1/general.html#git_tree_lookup-98" ], - "merge.c": [ - "ex/HEAD/merge.html#git_tree_lookup-46" - ] + "init.c": ["ex/v1.9.1/init.html#git_tree_lookup-13"], + "merge.c": ["ex/v1.9.1/merge.html#git_tree_lookup-37"] } }, "git_tree_lookup_prefix": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 47, "lineto": 51, "args": [ @@ -23966,59 +22388,48 @@ ], "argline": "git_tree **out, git_repository *repo, const git_oid *id, size_t len", "sig": "git_tree **::git_repository *::const git_oid *::size_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a tree object from the repository,\n given a prefix of its identifier (short id).

\n", "comments": "", "group": "tree" }, "git_tree_free": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 63, "lineto": 63, "args": [ - { - "name": "tree", - "type": "git_tree *", - "comment": "The tree to close" - } + { "name": "tree", "type": "git_tree *", "comment": "The tree to close" } ], "argline": "git_tree *tree", "sig": "git_tree *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Close an open tree

\n", "comments": "

You can no longer use the git_tree pointer after this call.

\n\n

IMPORTANT: You MUST call this method when you stop using a tree to release memory. Failure to do so will cause a memory leak.

\n", "group": "tree", "examples": { + "commit.c": ["ex/v1.9.1/commit.html#git_tree_free-14"], "diff.c": [ - "ex/HEAD/diff.html#git_tree_free-17", - "ex/HEAD/diff.html#git_tree_free-18" + "ex/v1.9.1/diff.html#git_tree_free-19", + "ex/v1.9.1/diff.html#git_tree_free-20" ], "general.c": [ - "ex/HEAD/general.html#git_tree_free-92", - "ex/HEAD/general.html#git_tree_free-93" - ], - "init.c": [ - "ex/HEAD/init.html#git_tree_free-15" + "ex/v1.9.1/general.html#git_tree_free-99", + "ex/v1.9.1/general.html#git_tree_free-100" ], + "init.c": ["ex/v1.9.1/init.html#git_tree_free-14"], "log.c": [ - "ex/HEAD/log.html#git_tree_free-59", - "ex/HEAD/log.html#git_tree_free-60", - "ex/HEAD/log.html#git_tree_free-61", - "ex/HEAD/log.html#git_tree_free-62", - "ex/HEAD/log.html#git_tree_free-63" + "ex/v1.9.1/log.html#git_tree_free-55", + "ex/v1.9.1/log.html#git_tree_free-56", + "ex/v1.9.1/log.html#git_tree_free-57", + "ex/v1.9.1/log.html#git_tree_free-58", + "ex/v1.9.1/log.html#git_tree_free-59" ] } }, "git_tree_id": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 71, "lineto": 71, "args": [ @@ -24040,7 +22451,7 @@ }, "git_tree_owner": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 79, "lineto": 79, "args": [ @@ -24062,7 +22473,7 @@ }, "git_tree_entrycount": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 87, "lineto": 87, "args": [ @@ -24082,17 +22493,13 @@ "comments": "", "group": "tree", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tree_entrycount-41" - ], - "general.c": [ - "ex/HEAD/general.html#git_tree_entrycount-94" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tree_entrycount-37"], + "general.c": ["ex/v1.9.1/general.html#git_tree_entrycount-101"] } }, "git_tree_entry_byname": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 99, "lineto": 100, "args": [ @@ -24117,14 +22524,12 @@ "comments": "

This returns a git_tree_entry that is owned by the git_tree. You don't have to free it, but you must not use it after the git_tree is released.

\n", "group": "tree", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_tree_entry_byname-95" - ] + "general.c": ["ex/v1.9.1/general.html#git_tree_entry_byname-102"] } }, "git_tree_entry_byindex": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 112, "lineto": 113, "args": [ @@ -24149,17 +22554,13 @@ "comments": "

This returns a git_tree_entry that is owned by the git_tree. You don't have to free it, but you must not use it after the git_tree is released.

\n", "group": "tree", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tree_entry_byindex-42" - ], - "general.c": [ - "ex/HEAD/general.html#git_tree_entry_byindex-96" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tree_entry_byindex-38"], + "general.c": ["ex/v1.9.1/general.html#git_tree_entry_byindex-103"] } }, "git_tree_entry_byid": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 127, "lineto": 128, "args": [ @@ -24186,7 +22587,7 @@ }, "git_tree_entry_bypath": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 142, "lineto": 145, "args": [ @@ -24218,7 +22619,7 @@ }, "git_tree_entry_dup": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 157, "lineto": 157, "args": [ @@ -24235,17 +22636,14 @@ ], "argline": "git_tree_entry **dest, const git_tree_entry *source", "sig": "git_tree_entry **::const git_tree_entry *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Duplicate a tree entry

\n", "comments": "

Create a copy of a tree entry. The returned copy is owned by the user, and must be freed explicitly with git_tree_entry_free().

\n", "group": "tree" }, "git_tree_entry_free": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 168, "lineto": 168, "args": [ @@ -24257,17 +22655,14 @@ ], "argline": "git_tree_entry *entry", "sig": "git_tree_entry *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a user-owned tree entry

\n", "comments": "

IMPORTANT: This function is only needed for tree entries owned by the user, such as the ones returned by git_tree_entry_dup() or git_tree_entry_bypath().

\n", "group": "tree" }, "git_tree_entry_name": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 176, "lineto": 176, "args": [ @@ -24279,26 +22674,21 @@ ], "argline": "const git_tree_entry *entry", "sig": "const git_tree_entry *", - "return": { - "type": "const char *", - "comment": " the name of the file" - }, + "return": { "type": "const char *", "comment": " the name of the file" }, "description": "

Get the filename of a tree entry

\n", "comments": "", "group": "tree", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tree_entry_name-43" - ], + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tree_entry_name-39"], "general.c": [ - "ex/HEAD/general.html#git_tree_entry_name-97", - "ex/HEAD/general.html#git_tree_entry_name-98" + "ex/v1.9.1/general.html#git_tree_entry_name-104", + "ex/v1.9.1/general.html#git_tree_entry_name-105" ] } }, "git_tree_entry_id": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 184, "lineto": 184, "args": [ @@ -24318,14 +22708,12 @@ "comments": "", "group": "tree", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tree_entry_id-44" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tree_entry_id-40"] } }, "git_tree_entry_type": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 192, "lineto": 192, "args": [ @@ -24338,21 +22726,19 @@ "argline": "const git_tree_entry *entry", "sig": "const git_tree_entry *", "return": { - "type": "git_otype", + "type": "git_object_t", "comment": " the type of the pointed object" }, "description": "

Get the type of the object pointed by the entry

\n", "comments": "", "group": "tree", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tree_entry_type-45" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tree_entry_type-41"] } }, "git_tree_entry_filemode": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 200, "lineto": 200, "args": [ @@ -24372,14 +22758,12 @@ "comments": "", "group": "tree", "examples": { - "cat-file.c": [ - "ex/HEAD/cat-file.html#git_tree_entry_filemode-46" - ] + "cat-file.c": ["ex/v1.9.1/cat-file.html#git_tree_entry_filemode-42"] } }, "git_tree_entry_filemode_raw": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 212, "lineto": 212, "args": [ @@ -24401,7 +22785,7 @@ }, "git_tree_entry_cmp": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 220, "lineto": 220, "args": [ @@ -24428,7 +22812,7 @@ }, "git_tree_entry_to_object": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 232, "lineto": 235, "args": [ @@ -24450,22 +22834,17 @@ ], "argline": "git_object **object_out, git_repository *repo, const git_tree_entry *entry", "sig": "git_object **::git_repository *::const git_tree_entry *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Convert a tree entry to the git_object it points to.

\n", "comments": "

You must call git_object_free() on the object when you are done with it.

\n", "group": "tree", "examples": { - "general.c": [ - "ex/HEAD/general.html#git_tree_entry_to_object-99" - ] + "general.c": ["ex/v1.9.1/general.html#git_tree_entry_to_object-106"] } }, "git_treebuilder_new": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 254, "lineto": 255, "args": [ @@ -24497,9 +22876,9 @@ }, "git_treebuilder_clear": { "type": "function", - "file": "tree.h", - "line": 262, - "lineto": 262, + "file": "git2/tree.h", + "line": 263, + "lineto": 263, "args": [ { "name": "bld", @@ -24510,18 +22889,18 @@ "argline": "git_treebuilder *bld", "sig": "git_treebuilder *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 on success; error code otherwise" }, - "description": "

Clear all the entires in the builder

\n", + "description": "

Clear all the entries in the builder

\n", "comments": "", "group": "treebuilder" }, "git_treebuilder_entrycount": { "type": "function", - "file": "tree.h", - "line": 270, - "lineto": 270, + "file": "git2/tree.h", + "line": 271, + "lineto": 271, "args": [ { "name": "bld", @@ -24532,7 +22911,7 @@ "argline": "git_treebuilder *bld", "sig": "git_treebuilder *", "return": { - "type": "unsigned int", + "type": "size_t", "comment": " the number of entries in the treebuilder" }, "description": "

Get the number of entries listed in a treebuilder

\n", @@ -24541,9 +22920,9 @@ }, "git_treebuilder_free": { "type": "function", - "file": "tree.h", - "line": 281, - "lineto": 281, + "file": "git2/tree.h", + "line": 282, + "lineto": 282, "args": [ { "name": "bld", @@ -24553,19 +22932,16 @@ ], "argline": "git_treebuilder *bld", "sig": "git_treebuilder *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a tree builder

\n", "comments": "

This will clear all the entries and free to builder. Failing to free the builder after you're done using it will result in a memory leak

\n", "group": "treebuilder" }, "git_treebuilder_get": { "type": "function", - "file": "tree.h", - "line": 293, - "lineto": 294, + "file": "git2/tree.h", + "line": 294, + "lineto": 295, "args": [ { "name": "bld", @@ -24590,9 +22966,9 @@ }, "git_treebuilder_insert": { "type": "function", - "file": "tree.h", - "line": 324, - "lineto": 329, + "file": "git2/tree.h", + "line": 325, + "lineto": 330, "args": [ { "name": "out", @@ -24622,19 +22998,16 @@ ], "argline": "const git_tree_entry **out, git_treebuilder *bld, const char *filename, const git_oid *id, git_filemode_t filemode", "sig": "const git_tree_entry **::git_treebuilder *::const char *::const git_oid *::git_filemode_t", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Add or update an entry to the builder

\n", "comments": "

Insert a new entry for filename in the builder with the given attributes.

\n\n

If an entry named filename already exists, its attributes will be updated with the given ones.

\n\n

The optional pointer out can be used to retrieve a pointer to the newly created/updated entry. Pass NULL if you do not need it. The pointer may not be valid past the next operation in this builder. Duplicate the entry if you want to keep it.

\n\n

By default the entry that you are inserting will be checked for validity; that it exists in the object database and is of the correct type. If you do not want this behavior, set the GIT_OPT_ENABLE_STRICT_OBJECT_CREATION library option to false.

\n", "group": "treebuilder" }, "git_treebuilder_remove": { "type": "function", - "file": "tree.h", - "line": 337, - "lineto": 338, + "file": "git2/tree.h", + "line": 339, + "lineto": 340, "args": [ { "name": "bld", @@ -24649,19 +23022,16 @@ ], "argline": "git_treebuilder *bld, const char *filename", "sig": "git_treebuilder *::const char *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Remove an entry from the builder by its filename

\n", "comments": "", "group": "treebuilder" }, "git_treebuilder_filter": { "type": "function", - "file": "tree.h", - "line": 361, - "lineto": 364, + "file": "git2/tree.h", + "line": 368, + "lineto": 371, "args": [ { "name": "bld", @@ -24682,8 +23052,8 @@ "argline": "git_treebuilder *bld, git_treebuilder_filter_cb filter, void *payload", "sig": "git_treebuilder *::git_treebuilder_filter_cb::void *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 on success, non-zero callback return value, or error code" }, "description": "

Selectively remove entries in the tree

\n", "comments": "

The filter callback will be called for each entry in the tree with a pointer to the entry and the provided payload; if the callback returns non-zero, the entry will be filtered (removed from the builder).

\n", @@ -24691,9 +23061,9 @@ }, "git_treebuilder_write": { "type": "function", - "file": "tree.h", - "line": 376, - "lineto": 377, + "file": "git2/tree.h", + "line": 383, + "lineto": 384, "args": [ { "name": "id", @@ -24708,49 +23078,14 @@ ], "argline": "git_oid *id, git_treebuilder *bld", "sig": "git_oid *::git_treebuilder *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Write the contents of the tree builder as a tree object

\n", "comments": "

The tree builder will be written to the given repo, and its identifying SHA1 hash will be stored in the id pointer.

\n", "group": "treebuilder" }, - "git_treebuilder_write_with_buffer": { - "type": "function", - "file": "tree.h", - "line": 390, - "lineto": 391, - "args": [ - { - "name": "oid", - "type": "git_oid *", - "comment": "Pointer to store the OID of the newly written tree" - }, - { - "name": "bld", - "type": "git_treebuilder *", - "comment": "Tree builder to write" - }, - { - "name": "tree", - "type": "git_buf *", - "comment": "Shared buffer for writing the tree. Will be grown as necessary." - } - ], - "argline": "git_oid *oid, git_treebuilder *bld, git_buf *tree", - "sig": "git_oid *::git_treebuilder *::git_buf *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, - "description": "

Write the contents of the tree builder as a tree object\n using a shared git_buf.

\n", - "comments": "", - "group": "treebuilder" - }, "git_tree_walk": { "type": "function", - "file": "tree.h", + "file": "git2/tree.h", "line": 420, "lineto": 424, "args": [ @@ -24777,19 +23112,16 @@ ], "argline": "const git_tree *tree, git_treewalk_mode mode, git_treewalk_cb callback, void *payload", "sig": "const git_tree *::git_treewalk_mode::git_treewalk_cb::void *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Traverse the entries in a tree and its subtrees in post or pre order.

\n", "comments": "

The entries will be traversed in the specified order, children subtrees will be automatically loaded as required, and the callback will be called once per entry with the current (relative) root for the entry and the entry data itself.

\n\n

If the callback returns a positive value, the passed entry will be skipped on the traversal (in pre mode). A negative value stops the walk.

\n", "group": "tree" }, "git_tree_dup": { "type": "function", - "file": "tree.h", - "line": 433, - "lineto": 433, + "file": "git2/tree.h", + "line": 434, + "lineto": 434, "args": [ { "name": "out", @@ -24804,25 +23136,18 @@ ], "argline": "git_tree **out, git_tree *source", "sig": "git_tree **::git_tree *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0" }, "description": "

Create an in-memory copy of a tree. The copy must be explicitly\n free'd or it will leak.

\n", "comments": "", "group": "tree" }, "git_tree_create_updated": { "type": "function", - "file": "tree.h", - "line": 479, - "lineto": 479, + "file": "git2/tree.h", + "line": 481, + "lineto": 481, "args": [ - { - "name": "out", - "type": "git_oid *", - "comment": "id of the new tree" - }, + { "name": "out", "type": "git_oid *", "comment": "id of the new tree" }, { "name": "repo", "type": "git_repository *", @@ -24846,19 +23171,16 @@ ], "argline": "git_oid *out, git_repository *repo, git_tree *baseline, size_t nupdates, const git_tree_update *updates", "sig": "git_oid *::git_repository *::git_tree *::size_t::const git_tree_update *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Create a tree based on another one with the specified modifications

\n", "comments": "

Given the baseline perform the changes described in the list of updates and create a new tree.

\n\n

This function is optimized for common file/directory addition, removal and replacement in trees. It is much more efficient than reading the tree into a git_index and modifying that, but in exchange it is not as flexible.

\n\n

Deleting and adding the same entry is undefined behaviour, changing a tree to a blob or viceversa is not supported.

\n", "group": "tree" }, "git_worktree_list": { "type": "function", - "file": "worktree.h", - "line": 34, - "lineto": 34, + "file": "git2/worktree.h", + "line": 35, + "lineto": 35, "args": [ { "name": "out", @@ -24873,19 +23195,16 @@ ], "argline": "git_strarray *out, git_repository *repo", "sig": "git_strarray *::git_repository *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

List names of linked working trees

\n", "comments": "

The returned list should be released with git_strarray_free when no longer needed.

\n", "group": "worktree" }, "git_worktree_lookup": { "type": "function", - "file": "worktree.h", - "line": 44, - "lineto": 44, + "file": "git2/worktree.h", + "line": 45, + "lineto": 45, "args": [ { "name": "out", @@ -24905,19 +23224,16 @@ ], "argline": "git_worktree **out, git_repository *repo, const char *name", "sig": "git_worktree **::git_repository *::const char *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Lookup a working tree by its name for a given repository

\n", "comments": "", "group": "worktree" }, "git_worktree_open_from_repository": { "type": "function", - "file": "worktree.h", - "line": 56, - "lineto": 56, + "file": "git2/worktree.h", + "line": 58, + "lineto": 58, "args": [ { "name": "out", @@ -24932,19 +23248,16 @@ ], "argline": "git_worktree **out, git_repository *repo", "sig": "git_worktree **::git_repository *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Open a worktree of a given repository

\n", "comments": "

If a repository is not the main tree but a worktree, this function will look up the worktree inside the parent repository and create a new git_worktree structure.

\n", "group": "worktree" }, "git_worktree_free": { "type": "function", - "file": "worktree.h", - "line": 63, - "lineto": 63, + "file": "git2/worktree.h", + "line": 65, + "lineto": 65, "args": [ { "name": "wt", @@ -24954,19 +23267,16 @@ ], "argline": "git_worktree *wt", "sig": "git_worktree *", - "return": { - "type": "void", - "comment": null - }, + "return": { "type": "void", "comment": null }, "description": "

Free a previously allocated worktree

\n", "comments": "", "group": "worktree" }, "git_worktree_validate": { "type": "function", - "file": "worktree.h", - "line": 75, - "lineto": 75, + "file": "git2/worktree.h", + "line": 77, + "lineto": 77, "args": [ { "name": "wt", @@ -24984,21 +23294,21 @@ "comments": "

A valid worktree requires both the git data structures inside the linked parent repository and the linked working copy to be present.

\n", "group": "worktree" }, - "git_worktree_add_init_options": { + "git_worktree_add_options_init": { "type": "function", - "file": "worktree.h", - "line": 95, - "lineto": 96, + "file": "git2/worktree.h", + "line": 116, + "lineto": 117, "args": [ { "name": "opts", "type": "git_worktree_add_options *", - "comment": "the struct to initialize" + "comment": "The `git_worktree_add_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Verison of struct; pass `GIT_WORKTREE_ADD_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_WORKTREE_ADD_OPTIONS_VERSION`." } ], "argline": "git_worktree_add_options *opts, unsigned int version", @@ -25007,15 +23317,15 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_worktree_add_options with default vaules.\n Equivalent to creating an instance with\n GIT_WORKTREE_ADD_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_worktree_add_options structure

\n", + "comments": "

Initializes a git_worktree_add_options with default values. Equivalent to creating an instance with GIT_WORKTREE_ADD_OPTIONS_INIT.

\n", "group": "worktree" }, "git_worktree_add": { "type": "function", - "file": "worktree.h", - "line": 112, - "lineto": 114, + "file": "git2/worktree.h", + "line": 133, + "lineto": 135, "args": [ { "name": "out", @@ -25045,19 +23355,16 @@ ], "argline": "git_worktree **out, git_repository *repo, const char *name, const char *path, const git_worktree_add_options *opts", "sig": "git_worktree **::git_repository *::const char *::const char *::const git_worktree_add_options *", - "return": { - "type": "int", - "comment": " 0 or an error code" - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Add a new working tree

\n", "comments": "

Add a new working tree for the repository, that is create the required data structures inside the repository and check out the current HEAD at path

\n", "group": "worktree" }, "git_worktree_lock": { "type": "function", - "file": "worktree.h", - "line": 126, - "lineto": 126, + "file": "git2/worktree.h", + "line": 147, + "lineto": 147, "args": [ { "name": "wt", @@ -25082,9 +23389,9 @@ }, "git_worktree_unlock": { "type": "function", - "file": "worktree.h", - "line": 135, - "lineto": 135, + "file": "git2/worktree.h", + "line": 156, + "lineto": 156, "args": [ { "name": "wt", @@ -25104,9 +23411,9 @@ }, "git_worktree_is_locked": { "type": "function", - "file": "worktree.h", - "line": 149, - "lineto": 149, + "file": "git2/worktree.h", + "line": 170, + "lineto": 170, "args": [ { "name": "reason", @@ -25129,21 +23436,65 @@ "comments": "

A worktree may be locked if the linked working tree is stored on a portable device which is not available.

\n", "group": "worktree" }, - "git_worktree_prune_init_options": { + "git_worktree_name": { "type": "function", - "file": "worktree.h", - "line": 182, - "lineto": 184, + "file": "git2/worktree.h", + "line": 179, + "lineto": 179, + "args": [ + { + "name": "wt", + "type": "const git_worktree *", + "comment": "Worktree to get the name for" + } + ], + "argline": "const git_worktree *wt", + "sig": "const git_worktree *", + "return": { + "type": "const char *", + "comment": " The worktree's name. The pointer returned is valid for the\n lifetime of the git_worktree" + }, + "description": "

Retrieve the name of the worktree

\n", + "comments": "", + "group": "worktree" + }, + "git_worktree_path": { + "type": "function", + "file": "git2/worktree.h", + "line": 188, + "lineto": 188, + "args": [ + { + "name": "wt", + "type": "const git_worktree *", + "comment": "Worktree to get the path for" + } + ], + "argline": "const git_worktree *wt", + "sig": "const git_worktree *", + "return": { + "type": "const char *", + "comment": " The worktree's filesystem path. The pointer returned\n is valid for the lifetime of the git_worktree." + }, + "description": "

Retrieve the filesystem path for the worktree

\n", + "comments": "", + "group": "worktree" + }, + "git_worktree_prune_options_init": { + "type": "function", + "file": "git2/worktree.h", + "line": 233, + "lineto": 235, "args": [ { "name": "opts", "type": "git_worktree_prune_options *", - "comment": "the struct to initialize" + "comment": "The `git_worktree_prune_options` struct to initialize." }, { "name": "version", "type": "unsigned int", - "comment": "Verison of struct; pass `GIT_WORKTREE_PRUNE_OPTIONS_VERSION`" + "comment": "The struct version; pass `GIT_WORKTREE_PRUNE_OPTIONS_VERSION`." } ], "argline": "git_worktree_prune_options *opts, unsigned int version", @@ -25152,179 +23503,286 @@ "type": "int", "comment": " Zero on success; -1 on failure." }, - "description": "

Initializes a git_worktree_prune_options with default vaules.\n Equivalent to creating an instance with\n GIT_WORKTREE_PRUNE_OPTIONS_INIT.

\n", - "comments": "", + "description": "

Initialize git_worktree_prune_options structure

\n", + "comments": "

Initializes a git_worktree_prune_options with default values. Equivalent to creating an instance with GIT_WORKTREE_PRUNE_OPTIONS_INIT.

\n", "group": "worktree" }, "git_worktree_is_prunable": { "type": "function", - "file": "worktree.h", - "line": 200, - "lineto": 201, + "file": "git2/worktree.h", + "line": 257, + "lineto": 258, "args": [ { "name": "wt", "type": "git_worktree *", - "comment": null + "comment": "Worktree to check." }, { "name": "opts", "type": "git_worktree_prune_options *", - "comment": null + "comment": "The prunable options." + } + ], + "argline": "git_worktree *wt, git_worktree_prune_options *opts", + "sig": "git_worktree *::git_worktree_prune_options *", + "return": { + "type": "int", + "comment": " 1 if the worktree is prunable, 0 otherwise, or an error code." + }, + "description": "

Is the worktree prunable with the given options?

\n", + "comments": "

A worktree is not prunable in the following scenarios:

\n\n\n\n

If the worktree is not valid and not locked or if the above flags have been passed in, this function will return a positive value. If the worktree is not prunable, an error message will be set (visible in giterr_last) with details about why.

\n", + "group": "worktree" + }, + "git_worktree_prune": { + "type": "function", + "file": "git2/worktree.h", + "line": 272, + "lineto": 273, + "args": [ + { + "name": "wt", + "type": "git_worktree *", + "comment": "Worktree to prune" + }, + { + "name": "opts", + "type": "git_worktree_prune_options *", + "comment": "Specifies which checks to override. See\n `git_worktree_is_prunable`. May be NULL" + } + ], + "argline": "git_worktree *wt, git_worktree_prune_options *opts", + "sig": "git_worktree *::git_worktree_prune_options *", + "return": { "type": "int", "comment": " 0 or an error code" }, + "description": "

Prune working tree

\n", + "comments": "

Prune the working tree, that is remove the git data structures on disk. The repository will only be pruned of git_worktree_is_prunable succeeds.

\n", + "group": "worktree" + } + }, + "callbacks": { + "git_apply_delta_cb": { + "type": "callback", + "file": "git2/apply.h", + "line": 41, + "lineto": 43, + "args": [ + { + "name": "delta", + "type": "const git_diff_delta *", + "comment": "The delta to be applied" + }, + { + "name": "payload", + "type": "void *", + "comment": "User-specified payload" + } + ], + "argline": "const git_diff_delta *delta, void *payload", + "sig": "const git_diff_delta *::void *", + "return": { + "type": "int", + "comment": " 0 if the delta is applied, \n<\n 0 if the apply process will be aborted\n\tor > 0 if the delta will not be applied." + }, + "description": "

When applying a patch, callback that will be made per delta (file).

\n", + "comments": "

When the callback: - returns < 0, the apply process will be aborted. - returns > 0, the delta will not be applied, but the apply process continues - returns 0, the delta is applied, and the apply process continues.

\n" + }, + "git_apply_hunk_cb": { + "type": "callback", + "file": "git2/apply.h", + "line": 59, + "lineto": 61, + "args": [ + { + "name": "hunk", + "type": "const git_diff_hunk *", + "comment": "The hunk to be applied" + }, + { + "name": "payload", + "type": "void *", + "comment": "User-specified payload" + } + ], + "argline": "const git_diff_hunk *hunk, void *payload", + "sig": "const git_diff_hunk *::void *", + "return": { + "type": "int", + "comment": " 0 if the hunk is applied, \n<\n 0 if the apply process will be aborted\n\tor > 0 if the hunk will not be applied." + }, + "description": "

When applying a patch, callback that will be made per hunk.

\n", + "comments": "

When the callback: - returns < 0, the apply process will be aborted. - returns > 0, the hunk will not be applied, but the apply process continues - returns 0, the hunk is applied, and the apply process continues.

\n" + }, + "git_attr_foreach_cb": { + "type": "callback", + "file": "git2/attr.h", + "line": 304, + "lineto": 304, + "args": [ + { + "name": "name", + "type": "const char *", + "comment": "The attribute name." + }, + { + "name": "value", + "type": "const char *", + "comment": "The attribute value. May be NULL if the attribute is explicitly\n set to UNSPECIFIED using the '!' sign." + }, + { + "name": "payload", + "type": "void *", + "comment": "A user-specified pointer." } ], - "argline": "git_worktree *wt, git_worktree_prune_options *opts", - "sig": "git_worktree *::git_worktree_prune_options *", + "argline": "const char *name, const char *value, void *payload", + "sig": "const char *::const char *::void *", "return": { "type": "int", - "comment": null + "comment": " 0 to continue looping, non-zero to stop. This value will be returned\n from git_attr_foreach." }, - "description": "

Is the worktree prunable with the given options?

\n", - "comments": "

A worktree is not prunable in the following scenarios:

\n\n\n\n

If the worktree is not valid and not locked or if the above flags have been passed in, this function will return a positive value.

\n", - "group": "worktree" + "description": "

The callback used with git_attr_foreach.

\n", + "comments": "

This callback will be invoked only once per attribute name, even if there are multiple rules for a given file. The highest priority rule will be used.

\n" }, - "git_worktree_prune": { - "type": "function", - "file": "worktree.h", - "line": 215, - "lineto": 216, + "git_transport_certificate_check_cb": { + "type": "callback", + "file": "git2/cert.h", + "line": 72, + "lineto": 72, "args": [ { - "name": "wt", - "type": "git_worktree *", - "comment": "Worktree to prune" + "name": "cert", + "type": "git_cert *", + "comment": "The host certificate" }, { - "name": "opts", - "type": "git_worktree_prune_options *", - "comment": "Specifies which checks to override. See\n `git_worktree_is_prunable`. May be NULL" + "name": "valid", + "type": "int", + "comment": "Whether the libgit2 checks (OpenSSL or WinHTTP) think\n this certificate is valid" + }, + { + "name": "host", + "type": "const char *", + "comment": "Hostname of the host libgit2 connected to" + }, + { + "name": "payload", + "type": "void *", + "comment": "Payload provided by the caller" } ], - "argline": "git_worktree *wt, git_worktree_prune_options *opts", - "sig": "git_worktree *::git_worktree_prune_options *", + "argline": "git_cert *cert, int valid, const char *host, void *payload", + "sig": "git_cert *::int::const char *::void *", "return": { "type": "int", - "comment": " 0 or an error code" + "comment": " 0 to proceed with the connection, \n<\n 0 to fail the connection\n or > 0 to indicate that the callback refused to act and that\n the existing validity determination should be honored" }, - "description": "

Prune working tree

\n", - "comments": "

Prune the working tree, that is remove the git data structures on disk. The repository will only be pruned of git_worktree_is_prunable succeeds.

\n", - "group": "worktree" - } - }, - "callbacks": { + "description": "

Callback for the user's custom certificate checks.

\n", + "comments": "" + }, "git_checkout_notify_cb": { "type": "callback", - "file": "checkout.h", - "line": 223, - "lineto": 229, + "file": "git2/checkout.h", + "line": 275, + "lineto": 281, "args": [ { "name": "why", "type": "git_checkout_notify_t", - "comment": null + "comment": "the notification reason" }, { "name": "path", "type": "const char *", - "comment": null + "comment": "the path to the file being checked out" }, { "name": "baseline", "type": "const git_diff_file *", - "comment": null + "comment": "the baseline's diff file information" }, { "name": "target", "type": "const git_diff_file *", - "comment": null + "comment": "the checkout target diff file information" }, { "name": "workdir", "type": "const git_diff_file *", - "comment": null + "comment": "the working directory diff file information" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-supplied callback payload" } ], "argline": "git_checkout_notify_t why, const char *path, const git_diff_file *baseline, const git_diff_file *target, const git_diff_file *workdir, void *payload", "sig": "git_checkout_notify_t::const char *::const git_diff_file *::const git_diff_file *::const git_diff_file *::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Checkout notification callback function

\n", + "return": { "type": "int", "comment": " 0 on success, or an error code" }, + "description": "

Checkout notification callback function.

\n", "comments": "" }, "git_checkout_progress_cb": { "type": "callback", - "file": "checkout.h", - "line": 232, - "lineto": 236, + "file": "git2/checkout.h", + "line": 291, + "lineto": 295, "args": [ { "name": "path", "type": "const char *", - "comment": null + "comment": "the path to the file being checked out" }, { "name": "completed_steps", "type": "size_t", - "comment": null + "comment": "number of checkout steps completed" }, { "name": "total_steps", "type": "size_t", - "comment": null + "comment": "number of total steps in the checkout process" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-supplied callback payload" } ], "argline": "const char *path, size_t completed_steps, size_t total_steps, void *payload", "sig": "const char *::size_t::size_t::void *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Checkout progress notification function

\n", + "return": { "type": "void", "comment": null }, + "description": "

Checkout progress notification function.

\n", "comments": "" }, "git_checkout_perfdata_cb": { "type": "callback", - "file": "checkout.h", - "line": 239, - "lineto": 241, + "file": "git2/checkout.h", + "line": 303, + "lineto": 305, "args": [ { "name": "perfdata", "type": "const git_checkout_perfdata *", - "comment": null + "comment": "the performance data for the checkout" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-supplied callback payload" } ], "argline": "const git_checkout_perfdata *perfdata, void *payload", "sig": "const git_checkout_perfdata *::void *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Checkout perfdata notification function

\n", + "return": { "type": "void", "comment": null }, + "description": "

Checkout performance data reporting function.

\n", "comments": "" }, "git_remote_create_cb": { "type": "callback", - "file": "clone.h", - "line": 69, - "lineto": 74, + "file": "git2/clone.h", + "line": 73, + "lineto": 78, "args": [ { "name": "out", @@ -25346,11 +23804,7 @@ "type": "const char *", "comment": "the remote's url" }, - { - "name": "payload", - "type": "void *", - "comment": "an opaque payload" - } + { "name": "payload", "type": "void *", "comment": "an opaque payload" } ], "argline": "git_remote **out, git_repository *repo, const char *name, const char *url, void *payload", "sig": "git_remote **::git_repository *::const char *::const char *::void *", @@ -25363,9 +23817,9 @@ }, "git_repository_create_cb": { "type": "callback", - "file": "clone.h", - "line": 90, - "lineto": 94, + "file": "git2/clone.h", + "line": 94, + "lineto": 98, "args": [ { "name": "out", @@ -25394,50 +23848,297 @@ "type": "int", "comment": " 0, or a negative value to indicate error" }, - "description": "

The signature of a function matchin git_repository_init, with an\n aditional void * as callback payload.

\n", + "description": "

The signature of a function matching git_repository_init, with an\n additional void * as callback payload.

\n", "comments": "

Callers of git_clone my provide a function matching this signature to override the repository creation and customization process during a clone operation.

\n" }, + "git_commit_create_cb": { + "type": "callback", + "file": "git2/commit.h", + "line": 643, + "lineto": 652, + "args": [ + { + "name": "out", + "type": "git_oid *", + "comment": "pointer that this callback will populate with the object\n id of the commit that is created" + }, + { + "name": "author", + "type": "const git_signature *", + "comment": "the author name and time of the commit" + }, + { + "name": "committer", + "type": "const git_signature *", + "comment": "the committer name and time of the commit" + }, + { + "name": "message_encoding", + "type": "const char *", + "comment": "the encoding of the given message, or NULL\n to assume UTF8" + }, + { + "name": "message", + "type": "const char *", + "comment": "the commit message" + }, + { + "name": "tree", + "type": "const git_tree *", + "comment": "the tree to be committed" + }, + { + "name": "parent_count", + "type": "size_t", + "comment": "the number of parents for this commit" + }, + { + "name": "parents", + "type": "const git_commit *[]", + "comment": "the commit parents" + }, + { + "name": "payload", + "type": "void *", + "comment": "the payload pointer in the rebase options" + } + ], + "argline": "git_oid *out, const git_signature *author, const git_signature *committer, const char *message_encoding, const char *message, const git_tree *tree, size_t parent_count, const git_commit *[] parents, void *payload", + "sig": "git_oid *::const git_signature *::const git_signature *::const char *::const char *::const git_tree *::size_t::const git_commit *[]::void *", + "return": { + "type": "int", + "comment": " 0 if this callback has created the commit and populated the out\n parameter, GIT_PASSTHROUGH if the callback has not created a\n commit and wants the calling function to create the commit as\n if no callback had been specified, any other value to stop\n and return a failure" + }, + "description": "

Commit creation callback: used when a function is going to create\n commits (for example, in git_rebase_commit) to allow callers to\n override the commit creation behavior. For example, users may\n wish to sign commits by providing this information to\n git_commit_create_buffer, signing that buffer, then calling\n git_commit_create_with_signature. The resultant commit id\n should be set in the out object id parameter.

\n", + "comments": "" + }, + "git_config_foreach_cb": { + "type": "callback", + "file": "git2/config.h", + "line": 140, + "lineto": 140, + "args": [ + { + "name": "entry", + "type": "const git_config_entry *", + "comment": "the entry currently being enumerated" + }, + { + "name": "payload", + "type": "void *", + "comment": "a user-specified pointer" + } + ], + "argline": "const git_config_entry *entry, void *payload", + "sig": "const git_config_entry *::void *", + "return": { + "type": "int", + "comment": " non-zero to terminate the iteration." + }, + "description": "

A config enumeration callback.

\n", + "comments": "" + }, + "git_credential_acquire_cb": { + "type": "callback", + "file": "git2/credential.h", + "line": 134, + "lineto": 139, + "args": [ + { + "name": "out", + "type": "git_credential **", + "comment": "The newly created credential object." + }, + { + "name": "url", + "type": "const char *", + "comment": "The resource for which we are demanding a credential." + }, + { + "name": "username_from_url", + "type": "const char *", + "comment": "The username that was embedded in a \"user\n@\nhost\"\n remote url, or NULL if not included." + }, + { + "name": "allowed_types", + "type": "unsigned int", + "comment": "A bitmask stating which credential types are OK to return." + }, + { + "name": "payload", + "type": "void *", + "comment": "The payload provided when specifying this callback." + } + ], + "argline": "git_credential **out, const char *url, const char *username_from_url, unsigned int allowed_types, void *payload", + "sig": "git_credential **::const char *::const char *::unsigned int::void *", + "return": { + "type": "int", + "comment": " 0 for success, \n<\n 0 to indicate an error, > 0 to indicate\n no credential was acquired" + }, + "description": "

Credential acquisition callback.

\n", + "comments": "

This callback is usually involved any time another system might need authentication. As such, you are expected to provide a valid git_credential object back, depending on allowed_types (a git_credential_t bitmask).

\n\n

Note that most authentication details are your responsibility - this callback will be called until the authentication succeeds, or you report an error. As such, it's easy to get in a loop if you fail to stop providing the same incorrect credentials.

\n" + }, + "git_credential_ssh_interactive_cb": { + "type": "callback", + "file": "git2/credential.h", + "line": 259, + "lineto": 265, + "args": [ + { "name": "name", "type": "const char *", "comment": "the name" }, + { + "name": "name_len", + "type": "int", + "comment": "the length of the name" + }, + { + "name": "instruction", + "type": "const char *", + "comment": "the authentication instruction" + }, + { + "name": "instruction_len", + "type": "int", + "comment": "the length of the instruction" + }, + { + "name": "num_prompts", + "type": "int", + "comment": "the number of prompts" + }, + { + "name": "prompts", + "type": "const LIBSSH2_USERAUTH_KBDINT_PROMPT *", + "comment": "the prompts" + }, + { + "name": "responses", + "type": "LIBSSH2_USERAUTH_KBDINT_RESPONSE *", + "comment": "the responses" + }, + { "name": "abstract", "type": "void **", "comment": "the abstract" } + ], + "argline": "const char *name, int name_len, const char *instruction, int instruction_len, int num_prompts, const LIBSSH2_USERAUTH_KBDINT_PROMPT *prompts, LIBSSH2_USERAUTH_KBDINT_RESPONSE *responses, void **abstract", + "sig": "const char *::int::const char *::int::int::const LIBSSH2_USERAUTH_KBDINT_PROMPT *::LIBSSH2_USERAUTH_KBDINT_RESPONSE *::void **", + "return": { "type": "void", "comment": null }, + "description": "

Callback for interactive SSH credentials.

\n", + "comments": "" + }, + "git_credential_sign_cb": { + "type": "callback", + "file": "git2/credential.h", + "line": 308, + "lineto": 312, + "args": [ + { + "name": "session", + "type": "LIBSSH2_SESSION *", + "comment": "the libssh2 session" + }, + { + "name": "sig", + "type": "unsigned char **", + "comment": "the signature" + }, + { + "name": "sig_len", + "type": "size_t *", + "comment": "the length of the signature" + }, + { + "name": "data", + "type": "const unsigned char *", + "comment": "the data" + }, + { + "name": "data_len", + "type": "size_t", + "comment": "the length of the data" + }, + { "name": "abstract", "type": "void **", "comment": "the abstract" } + ], + "argline": "LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, const unsigned char *data, size_t data_len, void **abstract", + "sig": "LIBSSH2_SESSION *::unsigned char **::size_t *::const unsigned char *::size_t::void **", + "return": { + "type": "int", + "comment": " 0 for success, \n<\n 0 to indicate an error, > 0 to indicate\n no credential was acquired" + }, + "description": "

Callback for credential signing.

\n", + "comments": "" + }, + "git_commit_signing_cb": { + "type": "callback", + "file": "git2/deprecated.h", + "line": 285, + "lineto": 289, + "args": [ + { "name": "signature", "type": "git_buf *", "comment": null }, + { "name": "signature_field", "type": "git_buf *", "comment": null }, + { "name": "commit_content", "type": "const char *", "comment": null }, + { "name": "payload", "type": "void *", "comment": null } + ], + "argline": "git_buf *signature, git_buf *signature_field, const char *commit_content, void *payload", + "sig": "git_buf *::git_buf *::const char *::void *", + "return": { "type": "int", "comment": null }, + "description": "

Provide a commit signature during commit creation.

\n", + "comments": "

Callers should instead define a git_commit_create_cb that generates a commit buffer using git_commit_create_buffer, sign that buffer and call git_commit_create_with_signature.

\n" + }, + "git_headlist_cb": { + "type": "callback", + "file": "git2/deprecated.h", + "line": 967, + "lineto": 967, + "args": [ + { "name": "rhead", "type": "git_remote_head *", "comment": null }, + { "name": "payload", "type": "void *", "comment": null } + ], + "argline": "git_remote_head *rhead, void *payload", + "sig": "git_remote_head *::void *", + "return": { "type": "int", "comment": null }, + "description": "

Callback for listing the remote heads

\n", + "comments": "" + }, "git_diff_notify_cb": { "type": "callback", - "file": "diff.h", - "line": 359, - "lineto": 363, + "file": "git2/diff.h", + "line": 352, + "lineto": 356, "args": [ { "name": "diff_so_far", "type": "const git_diff *", - "comment": null + "comment": "the diff structure as it currently exists" }, { "name": "delta_to_add", "type": "const git_diff_delta *", - "comment": null + "comment": "the delta that is to be added" }, { "name": "matched_pathspec", "type": "const char *", - "comment": null + "comment": "the pathspec" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-specified callback payload" } ], "argline": "const git_diff *diff_so_far, const git_diff_delta *delta_to_add, const char *matched_pathspec, void *payload", "sig": "const git_diff *::const git_diff_delta *::const char *::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, 1 to skip this delta, or an error code" }, "description": "

Diff notification callback function.

\n", - "comments": "

The callback will be called for each file, just before the git_delta_t gets inserted into the diff.

\n\n

When the callback: - returns < 0, the diff process will be aborted. - returns > 0, the delta will not be inserted into the diff, but the diff process continues. - returns 0, the delta is inserted into the diff, and the diff process continues.

\n" + "comments": "

The callback will be called for each file, just before the git_diff_delta gets inserted into the diff.

\n\n

When the callback: - returns < 0, the diff process will be aborted. - returns > 0, the delta will not be inserted into the diff, but the diff process continues. - returns 0, the delta is inserted into the diff, and the diff process continues.

\n" }, "git_diff_progress_cb": { "type": "callback", - "file": "diff.h", - "line": 375, - "lineto": 379, + "file": "git2/diff.h", + "line": 369, + "lineto": 373, "args": [ { "name": "diff_so_far", @@ -25457,23 +24158,20 @@ { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-specified callback payload" } ], "argline": "const git_diff *diff_so_far, const char *old_path, const char *new_path, void *payload", "sig": "const git_diff *::const char *::const char *::void *", - "return": { - "type": "int", - "comment": " Non-zero to abort the diff." - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

Diff progress callback.

\n", "comments": "

Called before each file comparison.

\n" }, "git_diff_file_cb": { "type": "callback", - "file": "diff.h", - "line": 458, - "lineto": 461, + "file": "git2/diff.h", + "line": 504, + "lineto": 507, "args": [ { "name": "delta", @@ -25493,928 +24191,743 @@ ], "argline": "const git_diff_delta *delta, float progress, void *payload", "sig": "const git_diff_delta *::float::void *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

When iterating over a diff, callback that will be made per file.

\n", "comments": "" }, "git_diff_binary_cb": { "type": "callback", - "file": "diff.h", - "line": 515, - "lineto": 518, + "file": "git2/diff.h", + "line": 576, + "lineto": 579, "args": [ { "name": "delta", "type": "const git_diff_delta *", - "comment": null + "comment": "the delta" }, { "name": "binary", "type": "const git_diff_binary *", - "comment": null + "comment": "the binary content" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-specified callback payload" } ], "argline": "const git_diff_delta *delta, const git_diff_binary *binary, void *payload", "sig": "const git_diff_delta *::const git_diff_binary *::void *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

When iterating over a diff, callback that will be made for\n binary content within the diff.

\n", "comments": "" }, "git_diff_hunk_cb": { "type": "callback", - "file": "diff.h", - "line": 535, - "lineto": 538, + "file": "git2/diff.h", + "line": 607, + "lineto": 610, "args": [ { "name": "delta", "type": "const git_diff_delta *", - "comment": null + "comment": "the delta" }, { "name": "hunk", "type": "const git_diff_hunk *", - "comment": null + "comment": "the hunk" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-specified callback payload" } ], "argline": "const git_diff_delta *delta, const git_diff_hunk *hunk, void *payload", "sig": "const git_diff_delta *::const git_diff_hunk *::void *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

When iterating over a diff, callback that will be made per hunk.

\n", "comments": "" }, "git_diff_line_cb": { "type": "callback", - "file": "diff.h", - "line": 588, - "lineto": 592, + "file": "git2/diff.h", + "line": 674, + "lineto": 678, "args": [ { "name": "delta", "type": "const git_diff_delta *", - "comment": null + "comment": "the delta that contains the line" }, { "name": "hunk", "type": "const git_diff_hunk *", - "comment": null + "comment": "the hunk that contains the line" }, { "name": "line", "type": "const git_diff_line *", - "comment": null + "comment": "the line in the diff" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-specified callback payload" } ], "argline": "const git_diff_delta *delta, const git_diff_hunk *hunk, const git_diff_line *line, void *payload", "sig": "const git_diff_delta *::const git_diff_hunk *::const git_diff_line *::void *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 or an error code" }, "description": "

When iterating over a diff, callback that will be made per text diff\n line. In this context, the provided range will be NULL.

\n", "comments": "

When printing a diff, callback that will be made to output each line of text. This uses some extra GIT_DIFF_LINE_... constants for output of lines of file and hunk headers.

\n" }, "git_index_matched_path_cb": { "type": "callback", - "file": "index.h", - "line": 146, - "lineto": 147, + "file": "git2/index.h", + "line": 158, + "lineto": 159, "args": [ - { - "name": "path", - "type": "const char *", - "comment": null - }, + { "name": "path", "type": "const char *", "comment": "the path" }, { "name": "matched_pathspec", "type": "const char *", - "comment": null + "comment": "the given pathspec" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the user-specified payload" } ], "argline": "const char *path, const char *matched_pathspec, void *payload", "sig": "const char *::const char *::void *", "return": { "type": "int", - "comment": null + "comment": " 0 to continue with the index operation, positive number to skip this file for the index operation, negative number on failure" }, - "description": "

Callback for APIs that add/remove/update files matching pathspec

\n", + "description": "

Callback for APIs that add/remove/update files matching pathspec

\n", "comments": "" }, - "git_headlist_cb": { + "git_indexer_progress_cb": { "type": "callback", - "file": "net.h", - "line": 55, - "lineto": 55, + "file": "git2/indexer.h", + "line": 68, + "lineto": 68, "args": [ { - "name": "rhead", - "type": "git_remote_head *", - "comment": null + "name": "stats", + "type": "const git_indexer_progress *", + "comment": "Structure containing information about the state of the transfer" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "Payload provided by caller" } ], - "argline": "git_remote_head *rhead, void *payload", - "sig": "git_remote_head *::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Callback for listing the remote heads

\n", + "argline": "const git_indexer_progress *stats, void *payload", + "sig": "const git_indexer_progress *::void *", + "return": { "type": "int", "comment": " 0 on success or an error code" }, + "description": "

Type for progress callbacks during indexing. Return a value less\n than zero to cancel the indexing or download.

\n", "comments": "" }, "git_note_foreach_cb": { "type": "callback", - "file": "notes.h", + "file": "git2/notes.h", "line": 29, - "lineto": 30, + "lineto": 32, "args": [ { "name": "blob_id", "type": "const git_oid *", - "comment": null + "comment": "object id of the blob containing the message" }, { "name": "annotated_object_id", "type": "const git_oid *", - "comment": null + "comment": "the id of the object being annotated" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "user-specified data to the foreach function" } ], "argline": "const git_oid *blob_id, const git_oid *annotated_object_id, void *payload", "sig": "const git_oid *::const git_oid *::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, or a negative number on failure" }, "description": "

Callback for git_note_foreach.

\n", - "comments": "

Receives: - blob_id: Oid of the blob containing the message - annotated_object_id: Oid of the git object being annotated - payload: Payload data passed to git_note_foreach

\n" + "comments": "" }, "git_odb_foreach_cb": { "type": "callback", - "file": "odb.h", - "line": 27, - "lineto": 27, + "file": "git2/odb.h", + "line": 43, + "lineto": 43, "args": [ { "name": "id", "type": "const git_oid *", - "comment": null + "comment": "an id of an object in the object database" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "the payload from the initial call to git_odb_foreach" } ], "argline": "const git_oid *id, void *payload", "sig": "const git_oid *::void *", - "return": { - "type": "int", - "comment": null - }, + "return": { "type": "int", "comment": " 0 on success, or an error code" }, "description": "

Function type for callbacks from git_odb_foreach.

\n", "comments": "" }, - "git_packbuilder_progress": { - "type": "callback", - "file": "pack.h", - "line": 210, - "lineto": 214, - "args": [ - { - "name": "stage", - "type": "int", - "comment": null - }, - { - "name": "current", - "type": "uint32_t", - "comment": null - }, - { - "name": "total", - "type": "uint32_t", - "comment": null - }, - { - "name": "payload", - "type": "void *", - "comment": null - } - ], - "argline": "int stage, uint32_t current, uint32_t total, void *payload", - "sig": "int::uint32_t::uint32_t::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Packbuilder progress notification function

\n", - "comments": "" - }, - "git_push_transfer_progress": { + "git_packbuilder_foreach_cb": { "type": "callback", - "file": "remote.h", - "line": 351, - "lineto": 355, + "file": "git2/pack.h", + "line": 208, + "lineto": 208, "args": [ { - "name": "current", - "type": "unsigned int", - "comment": null - }, - { - "name": "total", - "type": "unsigned int", - "comment": null - }, - { - "name": "bytes", - "type": "size_t", - "comment": null - }, - { - "name": "payload", + "name": "buf", "type": "void *", - "comment": null - } - ], - "argline": "unsigned int current, unsigned int total, size_t bytes, void *payload", - "sig": "unsigned int::unsigned int::size_t::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Push network progress notification function

\n", - "comments": "" - }, - "git_push_negotiation": { - "type": "callback", - "file": "remote.h", - "line": 386, - "lineto": 386, - "args": [ - { - "name": "updates", - "type": "const git_push_update **", - "comment": "an array containing the updates which will be sent\n as commands to the destination." + "comment": "A pointer to the object's data" }, { - "name": "len", + "name": "size", "type": "size_t", - "comment": "number of elements in `updates`" - }, - { - "name": "payload", - "type": "void *", - "comment": "Payload provided by the caller" - } - ], - "argline": "const git_push_update **updates, size_t len, void *payload", - "sig": "const git_push_update **::size_t::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Callback used to inform of upcoming updates.

\n", - "comments": "" - }, - "git_push_update_reference_cb": { - "type": "callback", - "file": "remote.h", - "line": 400, - "lineto": 400, - "args": [ - { - "name": "refname", - "type": "const char *", - "comment": "refname specifying to the remote ref" - }, - { - "name": "status", - "type": "const char *", - "comment": "status message sent from the remote" - }, - { - "name": "data", - "type": "void *", - "comment": "data provided by the caller" - } - ], - "argline": "const char *refname, const char *status, void *data", - "sig": "const char *::const char *::void *", - "return": { - "type": "int", - "comment": " 0 on success, otherwise an error" - }, - "description": "

Callback used to inform of the update status from the remote.

\n", - "comments": "

Called for each updated reference on push. If status is not NULL, the update was rejected by the remote server and status contains the reason given.

\n" - }, - "git_revwalk_hide_cb": { - "type": "callback", - "file": "revwalk.h", - "line": 277, - "lineto": 279, - "args": [ - { - "name": "commit_id", - "type": "const git_oid *", - "comment": "oid of Commit" - }, - { - "name": "payload", - "type": "void *", - "comment": "User-specified pointer to data to be passed as data payload" - } - ], - "argline": "const git_oid *commit_id, void *payload", - "sig": "const git_oid *::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

This is a callback function that user can provide to hide a\n commit and its parents. If the callback function returns non-zero value,\n then this commit and its parents will be hidden.

\n", - "comments": "" - }, - "git_stash_apply_progress_cb": { - "type": "callback", - "file": "stash.h", - "line": 113, - "lineto": 115, - "args": [ - { - "name": "progress", - "type": "git_stash_apply_progress_t", - "comment": null + "comment": "The size of the underlying object" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "Payload passed to git_packbuilder_foreach" } ], - "argline": "git_stash_apply_progress_t progress, void *payload", - "sig": "git_stash_apply_progress_t::void *", + "argline": "void *buf, size_t size, void *payload", + "sig": "void *::size_t::void *", "return": { "type": "int", - "comment": null + "comment": " non-zero to terminate the iteration" }, - "description": "

Stash application progress notification function.\n Return 0 to continue processing, or a negative value to\n abort the stash application.

\n", + "description": "

Callback used to iterate over packed objects

\n", "comments": "" }, - "git_stash_cb": { + "git_packbuilder_progress": { "type": "callback", - "file": "stash.h", - "line": 198, - "lineto": 202, + "file": "git2/pack.h", + "line": 245, + "lineto": 249, "args": [ { - "name": "index", - "type": "size_t", - "comment": "The position within the stash list. 0 points to the\n most recent stashed state." + "name": "stage", + "type": "int", + "comment": "the stage of the packbuilder" }, { - "name": "message", - "type": "const char *", - "comment": "The stash message." + "name": "current", + "type": "uint32_t", + "comment": "the current object" }, { - "name": "stash_id", - "type": "const int *", - "comment": "The commit oid of the stashed state." + "name": "total", + "type": "uint32_t", + "comment": "the total number of objects" }, { "name": "payload", "type": "void *", - "comment": "Extra parameter to callback function." + "comment": "the callback payload" } ], - "argline": "size_t index, const char *message, const int *stash_id, void *payload", - "sig": "size_t::const char *::const int *::void *", - "return": { - "type": "int", - "comment": " 0 to continue iterating or non-zero to stop." - }, - "description": "

This is a callback function you can provide to iterate over all the\n stashed states that will be invoked per entry.

\n", + "argline": "int stage, uint32_t current, uint32_t total, void *payload", + "sig": "int::uint32_t::uint32_t::void *", + "return": { "type": "int", "comment": " 0 on success or an error code" }, + "description": "

Packbuilder progress notification function.

\n", "comments": "" }, - "git_status_cb": { + "git_reference_foreach_cb": { "type": "callback", - "file": "status.h", - "line": 61, - "lineto": 62, + "file": "git2/refs.h", + "line": 439, + "lineto": 439, "args": [ { - "name": "path", - "type": "const char *", - "comment": null - }, - { - "name": "status_flags", - "type": "unsigned int", - "comment": null + "name": "reference", + "type": "git_reference *", + "comment": "The reference object" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "Payload passed to git_reference_foreach" } ], - "argline": "const char *path, unsigned int status_flags, void *payload", - "sig": "const char *::unsigned int::void *", + "argline": "git_reference *reference, void *payload", + "sig": "git_reference *::void *", "return": { "type": "int", - "comment": null + "comment": " non-zero to terminate the iteration" }, - "description": "

Function pointer to receive status on individual files

\n", - "comments": "

path is the relative path to the file from the root of the repository.

\n\n

status_flags is a combination of git_status_t values that apply.

\n\n

payload is the value you passed to the foreach function as payload.

\n" + "description": "

Callback used to iterate over references

\n", + "comments": "" }, - "git_submodule_cb": { + "git_reference_foreach_name_cb": { "type": "callback", - "file": "submodule.h", - "line": 118, - "lineto": 119, + "file": "git2/refs.h", + "line": 450, + "lineto": 450, "args": [ - { - "name": "sm", - "type": "git_submodule *", - "comment": "git_submodule currently being visited" - }, { "name": "name", "type": "const char *", - "comment": "name of the submodule" + "comment": "The reference name" }, { "name": "payload", "type": "void *", - "comment": "value you passed to the foreach function as payload" + "comment": "Payload passed to git_reference_foreach_name" } ], - "argline": "git_submodule *sm, const char *name, void *payload", - "sig": "git_submodule *::const char *::void *", + "argline": "const char *name, void *payload", + "sig": "const char *::void *", "return": { "type": "int", - "comment": " 0 on success or error code" + "comment": " non-zero to terminate the iteration" }, - "description": "

Function pointer to receive each submodule

\n", + "description": "

Callback used to iterate over reference names

\n", "comments": "" }, - "git_filter_init_fn": { + "git_push_transfer_progress_cb": { "type": "callback", - "file": "sys/filter.h", - "line": 141, - "lineto": 141, + "file": "git2/remote.h", + "line": 481, + "lineto": 485, "args": [ { - "name": "self", - "type": "git_filter *", - "comment": null + "name": "current", + "type": "unsigned int", + "comment": "The number of objects pushed so far" + }, + { + "name": "total", + "type": "unsigned int", + "comment": "The total number of objects to push" + }, + { + "name": "bytes", + "type": "size_t", + "comment": "The number of bytes pushed" + }, + { + "name": "payload", + "type": "void *", + "comment": "The user-specified payload callback" } ], - "argline": "git_filter *self", - "sig": "git_filter *", + "argline": "unsigned int current, unsigned int total, size_t bytes, void *payload", + "sig": "unsigned int::unsigned int::size_t::void *", "return": { "type": "int", - "comment": null + "comment": " 0 or an error code to stop the transfer" }, - "description": "

Initialize callback on filter

\n", - "comments": "

Specified as filter.initialize, this is an optional callback invoked before a filter is first used. It will be called once at most.

\n\n

If non-NULL, the filter's initialize callback will be invoked right before the first use of the filter, so you can defer expensive initialization operations (in case libgit2 is being used in a way that doesn't need the filter).

\n" + "description": "

Push network progress notification callback.

\n", + "comments": "" }, - "git_filter_shutdown_fn": { + "git_push_negotiation": { "type": "callback", - "file": "sys/filter.h", - "line": 153, - "lineto": 153, + "file": "git2/remote.h", + "line": 518, + "lineto": 521, "args": [ { - "name": "self", - "type": "git_filter *", - "comment": null + "name": "updates", + "type": "const git_push_update **", + "comment": "an array containing the updates which will be sent\n as commands to the destination." + }, + { + "name": "len", + "type": "size_t", + "comment": "number of elements in `updates`" + }, + { + "name": "payload", + "type": "void *", + "comment": "Payload provided by the caller" } ], - "argline": "git_filter *self", - "sig": "git_filter *", + "argline": "const git_push_update **updates, size_t len, void *payload", + "sig": "const git_push_update **::size_t::void *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " 0 or an error code to stop the push" }, - "description": "

Shutdown callback on filter

\n", - "comments": "

Specified as filter.shutdown, this is an optional callback invoked when the filter is unregistered or when libgit2 is shutting down. It will be called once at most and should release resources as needed. This may be called even if the initialize callback was not made.

\n\n

Typically this function will free the git_filter object itself.

\n" + "description": "

Callback used to inform of upcoming updates.

\n", + "comments": "" }, - "git_filter_check_fn": { + "git_push_update_reference_cb": { "type": "callback", - "file": "sys/filter.h", - "line": 175, - "lineto": 179, + "file": "git2/remote.h", + "line": 535, + "lineto": 535, "args": [ { - "name": "self", - "type": "git_filter *", - "comment": null - }, - { - "name": "payload", - "type": "void **", - "comment": null + "name": "refname", + "type": "const char *", + "comment": "refname specifying to the remote ref" }, { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "status", + "type": "const char *", + "comment": "status message sent from the remote" }, { - "name": "attr_values", - "type": "const char **", - "comment": null + "name": "data", + "type": "void *", + "comment": "data provided by the caller" } ], - "argline": "git_filter *self, void **payload, const git_filter_source *src, const char **attr_values", - "sig": "git_filter *::void **::const git_filter_source *::const char **", + "argline": "const char *refname, const char *status, void *data", + "sig": "const char *::const char *::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, otherwise an error" }, - "description": "

Callback to decide if a given source needs this filter

\n", - "comments": "

Specified as filter.check, this is an optional callback that checks if filtering is needed for a given source.

\n\n

It should return 0 if the filter should be applied (i.e. success), GIT_PASSTHROUGH if the filter should not be applied, or an error code to fail out of the filter processing pipeline and return to the caller.

\n\n

The attr_values will be set to the values of any attributes given in the filter definition. See git_filter below for more detail.

\n\n

The payload will be a pointer to a reference payload for the filter. This will start as NULL, but check can assign to this pointer for later use by the apply callback. Note that the value should be heap allocated (not stack), so that it doesn't go away before the apply callback can use it. If a filter allocates and assigns a value to the payload, it will need a cleanup callback to free the payload.

\n" + "description": "

Callback used to inform of the update status from the remote.

\n", + "comments": "

Called for each updated reference on push. If status is not NULL, the update was rejected by the remote server and status contains the reason given.

\n" }, - "git_filter_apply_fn": { + "git_url_resolve_cb": { "type": "callback", - "file": "sys/filter.h", - "line": 193, - "lineto": 198, + "file": "git2/remote.h", + "line": 551, + "lineto": 551, "args": [ { - "name": "self", - "type": "git_filter *", - "comment": null - }, - { - "name": "payload", - "type": "void **", - "comment": null + "name": "url_resolved", + "type": "git_buf *", + "comment": "The buffer to write the resolved URL to" }, { - "name": "to", - "type": "git_buf *", - "comment": null + "name": "url", + "type": "const char *", + "comment": "The URL to resolve" }, { - "name": "from", - "type": "const git_buf *", - "comment": null + "name": "direction", + "type": "int", + "comment": "GIT_DIRECTION_FETCH or GIT_DIRECTION_PUSH" }, { - "name": "src", - "type": "const git_filter_source *", - "comment": null + "name": "payload", + "type": "void *", + "comment": "Payload provided by the caller" } ], - "argline": "git_filter *self, void **payload, git_buf *to, const git_buf *from, const git_filter_source *src", - "sig": "git_filter *::void **::git_buf *::const git_buf *::const git_filter_source *", + "argline": "git_buf *url_resolved, const char *url, int direction, void *payload", + "sig": "git_buf *::const char *::int::void *", "return": { "type": "int", - "comment": null + "comment": " 0 on success, GIT_PASSTHROUGH or an error\n " }, - "description": "

Callback to actually perform the data filtering

\n", - "comments": "

Specified as filter.apply, this is the callback that actually filters data. If it successfully writes the output, it should return 0. Like check, it can return GIT_PASSTHROUGH to indicate that the filter doesn't want to run. Other error codes will stop filter processing and return to the caller.

\n\n

The payload value will refer to any payload that was set by the check callback. It may be read from or written to as needed.

\n" + "description": "

Callback to resolve URLs before connecting to remote

\n", + "comments": "

If you return GIT_PASSTHROUGH, you don't need to write anything to url_resolved.

\n" }, - "git_filter_cleanup_fn": { + "git_remote_ready_cb": { "type": "callback", - "file": "sys/filter.h", - "line": 215, - "lineto": 217, + "file": "git2/remote.h", + "line": 564, + "lineto": 564, "args": [ { - "name": "self", - "type": "git_filter *", - "comment": null + "name": "remote", + "type": "git_remote *", + "comment": "The remote to be connected" + }, + { + "name": "direction", + "type": "int", + "comment": "GIT_DIRECTION_FETCH or GIT_DIRECTION_PUSH" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "Payload provided by the caller" } ], - "argline": "git_filter *self, void *payload", - "sig": "git_filter *::void *", - "return": { - "type": "void", - "comment": null - }, - "description": "

Callback to clean up after filtering has been applied

\n", - "comments": "

Specified as filter.cleanup, this is an optional callback invoked after the filter has been applied. If the check or apply callbacks allocated a payload to keep per-source filter state, use this callback to free that payload and release resources as required.

\n" + "argline": "git_remote *remote, int direction, void *payload", + "sig": "git_remote *::int::void *", + "return": { "type": "int", "comment": " 0 on success, or an error" }, + "description": "

Callback invoked immediately before we attempt to connect to the\n given url. Callers may change the URL before the connection by\n calling git_remote_set_instance_url in the callback.

\n", + "comments": "" }, - "git_merge_driver_init_fn": { + "git_repository_fetchhead_foreach_cb": { "type": "callback", - "file": "sys/merge.h", - "line": 71, - "lineto": 71, + "file": "git2/repository.h", + "line": 746, + "lineto": 750, "args": [ { - "name": "self", - "type": "int *", - "comment": null + "name": "ref_name", + "type": "const char *", + "comment": "The reference name" + }, + { + "name": "remote_url", + "type": "const char *", + "comment": "The remote URL" + }, + { + "name": "oid", + "type": "const git_oid *", + "comment": "The reference target OID" + }, + { + "name": "is_merge", + "type": "unsigned int", + "comment": "Was the reference the result of a merge" + }, + { + "name": "payload", + "type": "void *", + "comment": "Payload passed to git_repository_fetchhead_foreach" } ], - "argline": "int *self", - "sig": "int *", + "argline": "const char *ref_name, const char *remote_url, const git_oid *oid, unsigned int is_merge, void *payload", + "sig": "const char *::const char *::const git_oid *::unsigned int::void *", "return": { "type": "int", - "comment": null + "comment": " non-zero to terminate the iteration" }, - "description": "

Initialize callback on merge driver

\n", - "comments": "

Specified as driver.initialize, this is an optional callback invoked before a merge driver is first used. It will be called once at most per library lifetime.

\n\n

If non-NULL, the merge driver's initialize callback will be invoked right before the first use of the driver, so you can defer expensive initialization operations (in case libgit2 is being used in a way that doesn't need the merge driver).

\n" + "description": "

Callback used to iterate over each FETCH_HEAD entry

\n", + "comments": "" }, - "git_merge_driver_shutdown_fn": { + "git_repository_mergehead_foreach_cb": { "type": "callback", - "file": "sys/merge.h", - "line": 83, - "lineto": 83, + "file": "git2/repository.h", + "line": 777, + "lineto": 778, "args": [ { - "name": "self", - "type": "int *", - "comment": null + "name": "oid", + "type": "const git_oid *", + "comment": "The merge OID" + }, + { + "name": "payload", + "type": "void *", + "comment": "Payload passed to git_repository_mergehead_foreach" } ], - "argline": "int *self", - "sig": "int *", + "argline": "const git_oid *oid, void *payload", + "sig": "const git_oid *::void *", "return": { - "type": "void", - "comment": null + "type": "int", + "comment": " non-zero to terminate the iteration" }, - "description": "

Shutdown callback on merge driver

\n", - "comments": "

Specified as driver.shutdown, this is an optional callback invoked when the merge driver is unregistered or when libgit2 is shutting down. It will be called once at most and should release resources as needed. This may be called even if the initialize callback was not made.

\n\n

Typically this function will free the git_merge_driver object itself.

\n" + "description": "

Callback used to iterate over each MERGE_HEAD entry

\n", + "comments": "" }, - "git_merge_driver_apply_fn": { + "git_revwalk_hide_cb": { "type": "callback", - "file": "sys/merge.h", - "line": 103, - "lineto": 109, + "file": "git2/revwalk.h", + "line": 283, + "lineto": 285, "args": [ { - "name": "self", - "type": "int *", - "comment": null - }, - { - "name": "path_out", - "type": "const char **", - "comment": null - }, - { - "name": "mode_out", - "type": "int *", - "comment": null - }, - { - "name": "merged_out", - "type": "int *", - "comment": null - }, - { - "name": "filter_name", - "type": "const char *", - "comment": null + "name": "commit_id", + "type": "const git_oid *", + "comment": "oid of Commit" }, { - "name": "src", - "type": "const git_merge_driver_source *", - "comment": null + "name": "payload", + "type": "void *", + "comment": "User-specified pointer to data to be passed as data payload" } ], - "argline": "int *self, const char **path_out, int *mode_out, int *merged_out, const char *filter_name, const git_merge_driver_source *src", - "sig": "int *::const char **::int *::int *::const char *::const git_merge_driver_source *", + "argline": "const git_oid *commit_id, void *payload", + "sig": "const git_oid *::void *", "return": { "type": "int", - "comment": null + "comment": " non-zero to hide the commmit and it parent." }, - "description": "

Callback to perform the merge.

\n", - "comments": "

Specified as driver.apply, this is the callback that actually does the merge. If it can successfully perform a merge, it should populate path_out with a pointer to the filename to accept, mode_out with the resultant mode, and merged_out with the buffer of the merged file and then return 0. If the driver returns GIT_PASSTHROUGH, then the default merge driver should instead be run. It can also return GIT_EMERGECONFLICT if the driver is not able to produce a merge result, and the file will remain conflicted. Any other errors will fail and return to the caller.

\n\n

The filter_name contains the name of the filter that was invoked, as specified by the file's attributes.

\n\n

The src contains the data about the file to be merged.

\n" + "description": "

This is a callback function that user can provide to hide a\n commit and its parents. If the callback function returns non-zero value,\n then this commit and its parents will be hidden.

\n", + "comments": "" }, - "git_trace_callback": { + "git_stash_apply_progress_cb": { "type": "callback", - "file": "trace.h", - "line": 52, - "lineto": 52, + "file": "git2/stash.h", + "line": 181, + "lineto": 183, "args": [ { - "name": "level", - "type": "git_trace_level_t", - "comment": null + "name": "progress", + "type": "git_stash_apply_progress_t", + "comment": "the progress information" }, { - "name": "msg", - "type": "const char *", - "comment": null + "name": "payload", + "type": "void *", + "comment": "the user-specified payload to the apply function" } ], - "argline": "git_trace_level_t level, const char *msg", - "sig": "git_trace_level_t::const char *", - "return": { - "type": "void", - "comment": null - }, - "description": "

An instance for a tracing function

\n", + "argline": "git_stash_apply_progress_t progress, void *payload", + "sig": "git_stash_apply_progress_t::void *", + "return": { "type": "int", "comment": " 0 on success, -1 on error" }, + "description": "

Stash application progress notification function.\n Return 0 to continue processing, or a negative value to\n abort the stash application.

\n", "comments": "" }, - "git_transport_cb": { + "git_stash_cb": { "type": "callback", - "file": "transport.h", - "line": 24, - "lineto": 24, + "file": "git2/stash.h", + "line": 268, + "lineto": 272, "args": [ { - "name": "out", - "type": "git_transport **", - "comment": null + "name": "index", + "type": "size_t", + "comment": "The position within the stash list. 0 points to the\n most recent stashed state." }, { - "name": "owner", - "type": "git_remote *", - "comment": null + "name": "message", + "type": "const char *", + "comment": "The stash message." }, { - "name": "param", + "name": "stash_id", + "type": "const git_oid *", + "comment": "The commit oid of the stashed state." + }, + { + "name": "payload", "type": "void *", - "comment": null + "comment": "Extra parameter to callback function." } ], - "argline": "git_transport **out, git_remote *owner, void *param", - "sig": "git_transport **::git_remote *::void *", + "argline": "size_t index, const char *message, const git_oid *stash_id, void *payload", + "sig": "size_t::const char *::const git_oid *::void *", "return": { "type": "int", - "comment": null + "comment": " 0 to continue iterating or non-zero to stop." }, - "description": "

Signature of a function which creates a transport

\n", + "description": "

This is a callback function you can provide to iterate over all the\n stashed states that will be invoked per entry.

\n", "comments": "" }, - "git_cred_acquire_cb": { + "git_status_cb": { "type": "callback", - "file": "transport.h", - "line": 333, - "lineto": 338, - "args": [ - { - "name": "cred", - "type": "git_cred **", - "comment": "The newly created credential object." - }, - { - "name": "url", - "type": "const char *", - "comment": "The resource for which we are demanding a credential." - }, + "file": "git2/status.h", + "line": 62, + "lineto": 63, + "args": [ { - "name": "username_from_url", + "name": "path", "type": "const char *", - "comment": "The username that was embedded in a \"user\n@\nhost\"\n remote url, or NULL if not included." + "comment": "is the path to the file" }, { - "name": "allowed_types", + "name": "status_flags", "type": "unsigned int", - "comment": "A bitmask stating which cred types are OK to return." + "comment": "the `git_status_t` values for file's status" }, { "name": "payload", "type": "void *", - "comment": "The payload provided when specifying this callback." + "comment": "the user-specified payload to the foreach function" } ], - "argline": "git_cred **cred, const char *url, const char *username_from_url, unsigned int allowed_types, void *payload", - "sig": "git_cred **::const char *::const char *::unsigned int::void *", + "argline": "const char *path, unsigned int status_flags, void *payload", + "sig": "const char *::unsigned int::void *", "return": { "type": "int", - "comment": " 0 for success, \n<\n 0 to indicate an error, > 0 to indicate\n no credential was acquired" + "comment": " 0 on success, or a negative number on failure" }, - "description": "

Signature of a function which acquires a credential object.

\n", + "description": "

Function pointer to receive status on individual files

\n", "comments": "" }, - "git_treebuilder_filter_cb": { + "git_submodule_cb": { "type": "callback", - "file": "tree.h", - "line": 347, - "lineto": 348, + "file": "git2/submodule.h", + "line": 125, + "lineto": 126, "args": [ { - "name": "entry", - "type": "const git_tree_entry *", - "comment": null + "name": "sm", + "type": "git_submodule *", + "comment": "git_submodule currently being visited" + }, + { + "name": "name", + "type": "const char *", + "comment": "name of the submodule" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "value you passed to the foreach function as payload" } ], - "argline": "const git_tree_entry *entry, void *payload", - "sig": "const git_tree_entry *::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Callback for git_treebuilder_filter

\n", - "comments": "

The return value is treated as a boolean, with zero indicating that the entry should be left alone and any non-zero value meaning that the entry should be removed from the treebuilder list (i.e. filtered out).

\n" + "argline": "git_submodule *sm, const char *name, void *payload", + "sig": "git_submodule *::const char *::void *", + "return": { "type": "int", "comment": " 0 on success or error code" }, + "description": "

Function pointer to receive each submodule

\n", + "comments": "" }, - "git_treewalk_cb": { + "git_tag_foreach_cb": { "type": "callback", - "file": "tree.h", - "line": 394, - "lineto": 395, + "file": "git2/tag.h", + "line": 330, + "lineto": 330, "args": [ - { - "name": "root", - "type": "const char *", - "comment": null - }, - { - "name": "entry", - "type": "const git_tree_entry *", - "comment": null - }, + { "name": "name", "type": "const char *", "comment": "The tag name" }, + { "name": "oid", "type": "git_oid *", "comment": "The tag's OID" }, { "name": "payload", "type": "void *", - "comment": null + "comment": "Payload passed to git_tag_foreach" } ], - "argline": "const char *root, const git_tree_entry *entry, void *payload", - "sig": "const char *::const git_tree_entry *::void *", + "argline": "const char *name, git_oid *oid, void *payload", + "sig": "const char *::git_oid *::void *", "return": { "type": "int", - "comment": null + "comment": " non-zero to terminate the iteration" }, - "description": "

Callback for the tree traversal method

\n", + "description": "

Callback used to iterate over tag names

\n", "comments": "" }, - "git_transfer_progress_cb": { + "git_trace_cb": { "type": "callback", - "file": "types.h", - "line": 274, - "lineto": 274, + "file": "git2/trace.h", + "line": 55, + "lineto": 57, "args": [ { - "name": "stats", - "type": "const git_transfer_progress *", - "comment": "Structure containing information about the state of the transfer" + "name": "level", + "type": "git_trace_level_t", + "comment": "the trace level" }, { - "name": "payload", - "type": "void *", - "comment": "Payload provided by caller" + "name": "msg", + "type": "const char *", + "comment": "the trace message" } ], - "argline": "const git_transfer_progress *stats, void *payload", - "sig": "const git_transfer_progress *::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Type for progress callbacks during indexing. Return a value less than zero\n to cancel the transfer.

\n", + "argline": "git_trace_level_t level, const char *msg", + "sig": "git_trace_level_t::const char *", + "return": { "type": "void", "comment": null }, + "description": "

An instance for a tracing function

\n", "comments": "" }, "git_transport_message_cb": { "type": "callback", - "file": "types.h", - "line": 284, - "lineto": 284, + "file": "git2/transport.h", + "line": 35, + "lineto": 35, "args": [ { "name": "str", @@ -26434,47 +24947,93 @@ ], "argline": "const char *str, int len, void *payload", "sig": "const char *::int::void *", - "return": { - "type": "int", - "comment": null - }, - "description": "

Type for messages delivered by the transport. Return a negative value\n to cancel the network operation.

\n", - "comments": "" + "return": { "type": "int", "comment": " 0 on success or an error code" }, + "description": "

Callback for messages received by the transport.

\n", + "comments": "

Return a negative value to cancel the network operation.

\n" }, - "git_transport_certificate_check_cb": { + "git_transport_cb": { "type": "callback", - "file": "types.h", - "line": 334, - "lineto": 334, + "file": "git2/transport.h", + "line": 45, + "lineto": 45, "args": [ { - "name": "cert", - "type": "git_cert *", - "comment": "The host certificate" + "name": "out", + "type": "git_transport **", + "comment": "the transport generate" }, { - "name": "valid", - "type": "int", - "comment": "Whether the libgit2 checks (OpenSSL or WinHTTP) think\n this certificate is valid" + "name": "owner", + "type": "git_remote *", + "comment": "the owner for the transport" }, { - "name": "host", + "name": "param", + "type": "void *", + "comment": "the param to the transport creation" + } + ], + "argline": "git_transport **out, git_remote *owner, void *param", + "sig": "git_transport **::git_remote *::void *", + "return": { "type": "int", "comment": " 0 on success or an error code" }, + "description": "

Signature of a function which creates a transport.

\n", + "comments": "" + }, + "git_treebuilder_filter_cb": { + "type": "callback", + "file": "git2/tree.h", + "line": 353, + "lineto": 354, + "args": [ + { + "name": "entry", + "type": "const git_tree_entry *", + "comment": "the tree entry for the callback to examine" + }, + { + "name": "payload", + "type": "void *", + "comment": "the payload from the caller" + } + ], + "argline": "const git_tree_entry *entry, void *payload", + "sig": "const git_tree_entry *::void *", + "return": { + "type": "int", + "comment": " 0 to do nothing, non-zero to remove the entry" + }, + "description": "

Callback for git_treebuilder_filter

\n", + "comments": "

The return value is treated as a boolean, with zero indicating that the entry should be left alone and any non-zero value meaning that the entry should be removed from the treebuilder list (i.e. filtered out).

\n" + }, + "git_treewalk_cb": { + "type": "callback", + "file": "git2/tree.h", + "line": 394, + "lineto": 395, + "args": [ + { + "name": "root", "type": "const char *", - "comment": "Hostname of the host libgit2 connected to" + "comment": "the current (relative) root to the entry" + }, + { + "name": "entry", + "type": "const git_tree_entry *", + "comment": "the tree entry" }, { "name": "payload", "type": "void *", - "comment": "Payload provided by the caller" + "comment": "the caller-provided callback payload" } ], - "argline": "git_cert *cert, int valid, const char *host, void *payload", - "sig": "git_cert *::int::const char *::void *", + "argline": "const char *root, const git_tree_entry *entry, void *payload", + "sig": "const char *::const git_tree_entry *::void *", "return": { "type": "int", - "comment": null + "comment": " a positive value to skip the entry, a negative value to stop the walk" }, - "description": "

Callback for the user's custom certificate checks.

\n", + "description": "

Callback for the tree traversal method.

\n", "comments": "" } }, @@ -26486,79 +25045,269 @@ "decl": "git_annotated_commit", "type": "struct", "value": "git_annotated_commit", - "file": "types.h", - "line": 182, - "lineto": 182, + "file": "git2/types.h", + "line": 214, + "lineto": 214, + "tdef": "typedef", + "description": " Annotated commits are commits with additional metadata about how the\n commit was resolved, which can be used for maintaining the user's\n \"intent\" through commands like merge and rebase.", + "comments": "

For example, if a user wants to conceptually "merge HEAD", then the commit portion of an annotated commit will point to the HEAD commit, but the annotation will denote the ref HEAD. This allows git to perform the internal bookkeeping so that the system knows both the content of what is being merged but also how the content was looked up so that it can be recorded in the reflog appropriately.

\n", + "used": { + "returns": [], + "needs": [ + "git_annotated_commit_free", + "git_annotated_commit_from_fetchhead", + "git_annotated_commit_from_ref", + "git_annotated_commit_from_revspec", + "git_annotated_commit_id", + "git_annotated_commit_lookup", + "git_annotated_commit_ref", + "git_branch_create_from_annotated", + "git_merge", + "git_merge_analysis", + "git_merge_analysis_for_ref", + "git_rebase_init", + "git_repository_set_head_detached_from_annotated", + "git_reset_from_annotated" + ] + } + } + ], + [ + "git_apply_flags_t", + { + "decl": ["GIT_APPLY_CHECK"], + "type": "enum", + "file": "git2/apply.h", + "line": 72, + "lineto": 78, + "block": "GIT_APPLY_CHECK", + "tdef": "typedef", + "description": " Flags controlling the behavior of `git_apply`.", + "comments": "

When the callback: - returns < 0, the apply process will be aborted. - returns > 0, the hunk will not be applied, but the apply process continues - returns 0, the hunk is applied, and the apply process continues.

\n", + "fields": [ + { + "type": "int", + "name": "GIT_APPLY_CHECK", + "comments": "

Don't actually make changes, just test that the patch applies.\n This is the equivalent of git apply --check.

\n", + "value": 1 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_apply_location_t", + { + "decl": [ + "GIT_APPLY_LOCATION_WORKDIR", + "GIT_APPLY_LOCATION_INDEX", + "GIT_APPLY_LOCATION_BOTH" + ], + "type": "enum", + "file": "git2/apply.h", + "line": 148, + "lineto": 166, + "block": "GIT_APPLY_LOCATION_WORKDIR\nGIT_APPLY_LOCATION_INDEX\nGIT_APPLY_LOCATION_BOTH", + "tdef": "typedef", + "description": " Possible application locations for git_apply ", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_APPLY_LOCATION_WORKDIR", + "comments": "

Apply the patch to the workdir, leaving the index untouched.\n This is the equivalent of git apply with no location argument.

\n", + "value": 0 + }, + { + "type": "int", + "name": "GIT_APPLY_LOCATION_INDEX", + "comments": "

Apply the patch to the index, leaving the working directory\n untouched. This is the equivalent of git apply --cached.

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_APPLY_LOCATION_BOTH", + "comments": "

Apply the patch to both the working directory and the index.\n This is the equivalent of git apply --index.

\n", + "value": 2 + } + ], + "used": { "returns": [], "needs": ["git_apply"] } + } + ], + [ + "git_apply_options", + { + "decl": [ + "unsigned int version", + "git_apply_delta_cb delta_cb", + "git_apply_hunk_cb hunk_cb", + "void * payload", + "unsigned int flags" + ], + "type": "struct", + "value": "git_apply_options", + "file": "git2/apply.h", + "line": 95, + "lineto": 109, + "block": "unsigned int version\ngit_apply_delta_cb delta_cb\ngit_apply_hunk_cb hunk_cb\nvoid * payload\nunsigned int flags", + "tdef": "typedef", + "description": " Apply options structure.", + "comments": "

When the callback: - returns < 0, the apply process will be aborted. - returns > 0, the hunk will not be applied, but the apply process continues - returns 0, the hunk is applied, and the apply process continues.

\n\n

Initialize with GIT_APPLY_OPTIONS_INIT. Alternatively, you can use git_apply_options_init.

\n", + "fields": [ + { + "type": "unsigned int", + "name": "version", + "comments": " The version " + }, + { + "type": "git_apply_delta_cb", + "name": "delta_cb", + "comments": " When applying a patch, callback that will be made per delta (file). " + }, + { + "type": "git_apply_hunk_cb", + "name": "hunk_cb", + "comments": " When applying a patch, callback that will be made per hunk. " + }, + { + "type": "void *", + "name": "payload", + "comments": " Payload passed to both `delta_cb` \n&\n `hunk_cb`. " + }, + { + "type": "unsigned int", + "name": "flags", + "comments": " Bitmask of `git_apply_flags_t` " + } + ], + "used": { + "returns": [], + "needs": ["git_apply", "git_apply_options_init", "git_apply_to_tree"] + } + } + ], + [ + "git_attr_options", + { + "decl": [ + "unsigned int version", + "unsigned int flags", + "git_oid * commit_id", + "git_oid attr_commit_id" + ], + "type": "struct", + "value": "git_attr_options", + "file": "git2/attr.h", + "line": 154, + "lineto": 171, + "block": "unsigned int version\nunsigned int flags\ngit_oid * commit_id\ngit_oid attr_commit_id", "tdef": "typedef", - "description": " Annotated commits, the input to merge and rebase. ", + "description": " An options structure for querying attributes.", "comments": "", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "unsigned int", + "name": "flags", + "comments": " A combination of GIT_ATTR_CHECK flags " + }, + { "type": "git_oid *", "name": "commit_id", "comments": "" }, + { + "type": "git_oid", + "name": "attr_commit_id", + "comments": " The commit to load attributes from, when\n `GIT_ATTR_CHECK_INCLUDE_COMMIT` is specified." + } + ], "used": { "returns": [], "needs": [ - "git_annotated_commit_free", - "git_annotated_commit_from_fetchhead", - "git_annotated_commit_from_ref", - "git_annotated_commit_from_revspec", - "git_annotated_commit_id", - "git_annotated_commit_lookup", - "git_branch_create_from_annotated", - "git_merge", - "git_merge_analysis", - "git_rebase_init", - "git_repository_set_head_detached_from_annotated", - "git_reset_from_annotated" + "git_attr_foreach_ext", + "git_attr_get_ext", + "git_attr_get_many_ext" ] } } ], [ - "git_attr_t", + "git_attr_value_t", { "decl": [ - "GIT_ATTR_UNSPECIFIED_T", - "GIT_ATTR_TRUE_T", - "GIT_ATTR_FALSE_T", - "GIT_ATTR_VALUE_T" + "GIT_ATTR_VALUE_UNSPECIFIED", + "GIT_ATTR_VALUE_TRUE", + "GIT_ATTR_VALUE_FALSE", + "GIT_ATTR_VALUE_STRING" ], "type": "enum", - "file": "attr.h", - "line": 82, - "lineto": 87, - "block": "GIT_ATTR_UNSPECIFIED_T\nGIT_ATTR_TRUE_T\nGIT_ATTR_FALSE_T\nGIT_ATTR_VALUE_T", + "file": "git2/attr.h", + "line": 86, + "lineto": 91, + "block": "GIT_ATTR_VALUE_UNSPECIFIED\nGIT_ATTR_VALUE_TRUE\nGIT_ATTR_VALUE_FALSE\nGIT_ATTR_VALUE_STRING", "tdef": "typedef", "description": " Possible states for an attribute", "comments": "", "fields": [ { "type": "int", - "name": "GIT_ATTR_UNSPECIFIED_T", + "name": "GIT_ATTR_VALUE_UNSPECIFIED", "comments": "

The attribute has been left unspecified

\n", "value": 0 }, { "type": "int", - "name": "GIT_ATTR_TRUE_T", + "name": "GIT_ATTR_VALUE_TRUE", "comments": "

The attribute has been set

\n", "value": 1 }, { "type": "int", - "name": "GIT_ATTR_FALSE_T", + "name": "GIT_ATTR_VALUE_FALSE", "comments": "

The attribute has been unset

\n", "value": 2 }, { "type": "int", - "name": "GIT_ATTR_VALUE_T", + "name": "GIT_ATTR_VALUE_STRING", "comments": "

This attribute has a value

\n", "value": 3 } ], + "used": { "returns": ["git_attr_value"], "needs": [] } + } + ], + [ + "git_blame", + { + "decl": "git_blame", + "type": "struct", + "value": "git_blame", + "file": "git2/blame.h", + "line": 236, + "lineto": 236, + "tdef": "typedef", + "description": " Opaque structure to hold blame results ", + "comments": "", "used": { "returns": [ - "git_attr_value" + "git_blame_get_hunk_byindex", + "git_blame_get_hunk_byline", + "git_blame_hunk_byindex", + "git_blame_hunk_byline", + "git_blame_line_byindex" ], - "needs": [] + "needs": [ + "git_blame_buffer", + "git_blame_free", + "git_blame_get_hunk_byindex", + "git_blame_get_hunk_byline", + "git_blame_get_hunk_count", + "git_blame_hunk_byindex", + "git_blame_hunk_byline", + "git_blame_hunkcount", + "git_blame_init_options", + "git_blame_line_byindex", + "git_blame_linecount", + "git_blame_options_init" + ] } } ], @@ -26571,13 +25320,15 @@ "GIT_BLAME_TRACK_COPIES_SAME_COMMIT_MOVES", "GIT_BLAME_TRACK_COPIES_SAME_COMMIT_COPIES", "GIT_BLAME_TRACK_COPIES_ANY_COMMIT_COPIES", - "GIT_BLAME_FIRST_PARENT" + "GIT_BLAME_FIRST_PARENT", + "GIT_BLAME_USE_MAILMAP", + "GIT_BLAME_IGNORE_WHITESPACE" ], "type": "enum", - "file": "blame.h", - "line": 26, - "lineto": 46, - "block": "GIT_BLAME_NORMAL\nGIT_BLAME_TRACK_COPIES_SAME_FILE\nGIT_BLAME_TRACK_COPIES_SAME_COMMIT_MOVES\nGIT_BLAME_TRACK_COPIES_SAME_COMMIT_COPIES\nGIT_BLAME_TRACK_COPIES_ANY_COMMIT_COPIES\nGIT_BLAME_FIRST_PARENT", + "file": "git2/blame.h", + "line": 31, + "lineto": 82, + "block": "GIT_BLAME_NORMAL\nGIT_BLAME_TRACK_COPIES_SAME_FILE\nGIT_BLAME_TRACK_COPIES_SAME_COMMIT_MOVES\nGIT_BLAME_TRACK_COPIES_SAME_COMMIT_COPIES\nGIT_BLAME_TRACK_COPIES_ANY_COMMIT_COPIES\nGIT_BLAME_FIRST_PARENT\nGIT_BLAME_USE_MAILMAP\nGIT_BLAME_IGNORE_WHITESPACE", "tdef": "typedef", "description": " Flags for indicating option behavior for git_blame APIs.", "comments": "", @@ -26591,117 +25342,67 @@ { "type": "int", "name": "GIT_BLAME_TRACK_COPIES_SAME_FILE", - "comments": "

Track lines that have moved within a file (like git blame -M).\n NOT IMPLEMENTED.

\n", + "comments": "

Track lines that have moved within a file (like git blame -M).

\n\n

This is not yet implemented and reserved for future use.

\n", "value": 1 }, { "type": "int", "name": "GIT_BLAME_TRACK_COPIES_SAME_COMMIT_MOVES", - "comments": "

Track lines that have moved across files in the same commit (like git blame -C).\n NOT IMPLEMENTED.

\n", + "comments": "

Track lines that have moved across files in the same commit\n (like git blame -C).

\n\n

This is not yet implemented and reserved for future use.

\n", "value": 2 }, { "type": "int", "name": "GIT_BLAME_TRACK_COPIES_SAME_COMMIT_COPIES", - "comments": "

Track lines that have been copied from another file that exists in the\n same commit (like git blame -CC). Implies SAME_FILE.\n NOT IMPLEMENTED.

\n", + "comments": "

Track lines that have been copied from another file that exists\n in the same commit (like git blame -CC). Implies SAME_FILE.

\n\n

This is not yet implemented and reserved for future use.

\n", "value": 4 }, { "type": "int", "name": "GIT_BLAME_TRACK_COPIES_ANY_COMMIT_COPIES", - "comments": "

Track lines that have been copied from another file that exists in any\n commit (like git blame -CCC). Implies SAME_COMMIT_COPIES.\n NOT IMPLEMENTED.

\n", + "comments": "

Track lines that have been copied from another file that exists in\n any commit (like git blame -CCC). Implies SAME_COMMIT_COPIES.

\n\n

This is not yet implemented and reserved for future use.

\n", "value": 8 }, { "type": "int", "name": "GIT_BLAME_FIRST_PARENT", - "comments": "

Restrict the search of commits to those reachable following only the\n first parents.

\n", + "comments": "

Restrict the search of commits to those reachable following only\n the first parents.

\n", "value": 16 + }, + { + "type": "int", + "name": "GIT_BLAME_USE_MAILMAP", + "comments": "

Use mailmap file to map author and committer names and email\n addresses to canonical real names and email addresses. The\n mailmap will be read from the working directory, or HEAD in a\n bare repository.

\n", + "value": 32 + }, + { + "type": "int", + "name": "GIT_BLAME_IGNORE_WHITESPACE", + "comments": "

Ignore whitespace differences

\n", + "value": 64 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_blame_hunk", + "git_blame_line", { - "decl": [ - "size_t lines_in_hunk", - "git_oid final_commit_id", - "size_t final_start_line_number", - "git_signature * final_signature", - "git_oid orig_commit_id", - "const char * orig_path", - "size_t orig_start_line_number", - "git_signature * orig_signature", - "char boundary" - ], + "decl": ["const char * ptr", "size_t len"], "type": "struct", - "value": "git_blame_hunk", - "file": "blame.h", - "line": 115, - "lineto": 128, - "block": "size_t lines_in_hunk\ngit_oid final_commit_id\nsize_t final_start_line_number\ngit_signature * final_signature\ngit_oid orig_commit_id\nconst char * orig_path\nsize_t orig_start_line_number\ngit_signature * orig_signature\nchar boundary", + "value": "git_blame_line", + "file": "git2/blame.h", + "line": 230, + "lineto": 233, + "block": "const char * ptr\nsize_t len", "tdef": "typedef", - "description": " Structure that represents a blame hunk.", - "comments": "\n", + "description": " Structure that represents a line in a blamed file.", + "comments": "", "fields": [ - { - "type": "size_t", - "name": "lines_in_hunk", - "comments": "" - }, - { - "type": "git_oid", - "name": "final_commit_id", - "comments": "" - }, - { - "type": "size_t", - "name": "final_start_line_number", - "comments": "" - }, - { - "type": "git_signature *", - "name": "final_signature", - "comments": "" - }, - { - "type": "git_oid", - "name": "orig_commit_id", - "comments": "" - }, - { - "type": "const char *", - "name": "orig_path", - "comments": "" - }, - { - "type": "size_t", - "name": "orig_start_line_number", - "comments": "" - }, - { - "type": "git_signature *", - "name": "orig_signature", - "comments": "" - }, - { - "type": "char", - "name": "boundary", - "comments": "" - } + { "type": "const char *", "name": "ptr", "comments": "" }, + { "type": "size_t", "name": "len", "comments": "" } ], - "used": { - "returns": [ - "git_blame_get_hunk_byindex", - "git_blame_get_hunk_byline" - ], - "needs": [] - } + "used": { "returns": ["git_blame_line_byindex"], "needs": [] } } ], [ @@ -26709,7 +25410,7 @@ { "decl": [ "unsigned int version", - "uint32_t flags", + "unsigned int flags", "uint16_t min_match_characters", "git_oid newest_commit", "git_oid oldest_commit", @@ -26718,56 +25419,49 @@ ], "type": "struct", "value": "git_blame_options", - "file": "blame.h", - "line": 70, - "lineto": 79, - "block": "unsigned int version\nuint32_t flags\nuint16_t min_match_characters\ngit_oid newest_commit\ngit_oid oldest_commit\nsize_t min_line\nsize_t max_line", + "file": "git2/blame.h", + "line": 91, + "lineto": 128, + "block": "unsigned int version\nunsigned int flags\nuint16_t min_match_characters\ngit_oid newest_commit\ngit_oid oldest_commit\nsize_t min_line\nsize_t max_line", "tdef": "typedef", "description": " Blame options structure", - "comments": "

Use zeros to indicate default settings. It's easiest to use the GIT_BLAME_OPTIONS_INIT macro: git_blame_options opts = GIT_BLAME_OPTIONS_INIT;

\n\n\n", + "comments": "

Initialize with GIT_BLAME_OPTIONS_INIT. Alternatively, you can use git_blame_options_init.

\n", "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "unsigned int", - "name": "version", - "comments": "" - }, - { - "type": "uint32_t", "name": "flags", - "comments": "" + "comments": " A combination of `git_blame_flag_t` " }, { "type": "uint16_t", "name": "min_match_characters", - "comments": "" + "comments": " The lower bound on the number of alphanumeric characters that\n must be detected as moving/copying within a file for it to\n associate those lines with the parent commit. The default value\n is 20.\n\n This value only takes effect if any of the `GIT_BLAME_TRACK_COPIES_*`\n flags are specified." }, { "type": "git_oid", "name": "newest_commit", - "comments": "" + "comments": " The id of the newest commit to consider. The default is HEAD. " }, { "type": "git_oid", "name": "oldest_commit", - "comments": "" + "comments": " The id of the oldest commit to consider.\n The default is the first commit encountered with a NULL parent." }, { "type": "size_t", "name": "min_line", - "comments": "" + "comments": " The first line in the file to blame.\n The default is 1 (line numbers start with 1)." }, { "type": "size_t", "name": "max_line", - "comments": "" + "comments": " The last line in the file to blame.\n The default is the last line of the file." } ], "used": { "returns": [], - "needs": [ - "git_blame_file", - "git_blame_init_options" - ] + "needs": ["git_blame_init_options", "git_blame_options_init"] } } ], @@ -26777,9 +25471,9 @@ "decl": "git_blob", "type": "struct", "value": "git_blob", - "file": "types.h", - "line": 120, - "lineto": 120, + "file": "git2/types.h", + "line": 138, + "lineto": 138, "tdef": "typedef", "description": " In-memory representation of a blob object. ", "comments": "", @@ -26787,6 +25481,8 @@ "returns": [], "needs": [ "git_blob_dup", + "git_blob_filter", + "git_blob_filter_options_init", "git_blob_filtered_content", "git_blob_free", "git_blob_id", @@ -26800,6 +25496,7 @@ "git_diff_blobs", "git_filter_list_apply_to_blob", "git_filter_list_load", + "git_filter_list_load_ext", "git_filter_list_stream_blob", "git_patch_from_blob_and_buffer", "git_patch_from_blobs" @@ -26807,15 +25504,107 @@ } } ], + [ + "git_blob_filter_flag_t", + { + "decl": [ + "GIT_BLOB_FILTER_CHECK_FOR_BINARY", + "GIT_BLOB_FILTER_NO_SYSTEM_ATTRIBUTES", + "GIT_BLOB_FILTER_ATTRIBUTES_FROM_HEAD", + "GIT_BLOB_FILTER_ATTRIBUTES_FROM_COMMIT" + ], + "type": "enum", + "file": "git2/blob.h", + "line": 111, + "lineto": 132, + "block": "GIT_BLOB_FILTER_CHECK_FOR_BINARY\nGIT_BLOB_FILTER_NO_SYSTEM_ATTRIBUTES\nGIT_BLOB_FILTER_ATTRIBUTES_FROM_HEAD\nGIT_BLOB_FILTER_ATTRIBUTES_FROM_COMMIT", + "tdef": "typedef", + "description": " Flags to control the functionality of `git_blob_filter`.", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_BLOB_FILTER_CHECK_FOR_BINARY", + "comments": "

When set, filters will not be applied to binary files.

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_BLOB_FILTER_NO_SYSTEM_ATTRIBUTES", + "comments": "

When set, filters will not load configuration from the\n system-wide gitattributes in /etc (or system equivalent).

\n", + "value": 2 + }, + { + "type": "int", + "name": "GIT_BLOB_FILTER_ATTRIBUTES_FROM_HEAD", + "comments": "

When set, filters will be loaded from a .gitattributes file\n in the HEAD commit.

\n", + "value": 4 + }, + { + "type": "int", + "name": "GIT_BLOB_FILTER_ATTRIBUTES_FROM_COMMIT", + "comments": "

When set, filters will be loaded from a .gitattributes file\n in the specified commit.

\n", + "value": 8 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_blob_filter_options", + { + "decl": [ + "int version", + "uint32_t flags", + "git_oid * commit_id", + "git_oid attr_commit_id" + ], + "type": "struct", + "value": "git_blob_filter_options", + "file": "git2/blob.h", + "line": 144, + "lineto": 176, + "block": "int version\nuint32_t flags\ngit_oid * commit_id\ngit_oid attr_commit_id", + "tdef": "typedef", + "description": " The options used when applying filter options to a file.", + "comments": "

Initialize with GIT_BLOB_FILTER_OPTIONS_INIT. Alternatively, you can use git_blob_filter_options_init.

\n\n

[version] GIT_BLOB_FILTER_OPTIONS_VERSION [init_macro] GIT_BLOB_FILTER_OPTIONS_INIT [init_function] git_blob_filter_options_init

\n", + "fields": [ + { + "type": "int", + "name": "version", + "comments": " Version number of the options structure. " + }, + { + "type": "uint32_t", + "name": "flags", + "comments": " Flags to control the filtering process, see `git_blob_filter_flag_t` above.\n\n \n\n[flags] git_blob_filter_flag_t" + }, + { + "type": "git_oid *", + "name": "commit_id", + "comments": " This value is unused and reserved for API compatibility.\n\n " + }, + { + "type": "git_oid", + "name": "attr_commit_id", + "comments": " The commit to load attributes from, when\n `GIT_BLOB_FILTER_ATTRIBUTES_FROM_COMMIT` is specified." + } + ], + "used": { + "returns": [], + "needs": ["git_blob_filter", "git_blob_filter_options_init"] + } + } + ], [ "git_branch_iterator", { "decl": "git_branch_iterator", "type": "struct", "value": "git_branch_iterator", - "file": "branch.h", - "line": 88, - "lineto": 88, + "file": "git2/branch.h", + "line": 97, + "lineto": 97, "tdef": "typedef", "description": " Iterator type for branches ", "comments": "", @@ -26832,15 +25621,11 @@ [ "git_branch_t", { - "decl": [ - "GIT_BRANCH_LOCAL", - "GIT_BRANCH_REMOTE", - "GIT_BRANCH_ALL" - ], + "decl": ["GIT_BRANCH_LOCAL", "GIT_BRANCH_REMOTE", "GIT_BRANCH_ALL"], "type": "enum", - "file": "types.h", - "line": 202, - "lineto": 206, + "file": "git2/types.h", + "line": 231, + "lineto": 235, "block": "GIT_BRANCH_LOCAL\nGIT_BRANCH_REMOTE\nGIT_BRANCH_ALL", "tdef": "typedef", "description": " Basic type of any Git branch. ", @@ -26878,42 +25663,44 @@ [ "git_buf", { - "decl": [ - "char * ptr", - "size_t asize", - "size_t size" - ], + "decl": ["char * ptr", "size_t reserved", "size_t size"], "type": "struct", "value": "git_buf", - "file": "buffer.h", - "line": 52, + "file": "git2/buffer.h", + "line": 36, "lineto": 55, - "block": "char * ptr\nsize_t asize\nsize_t size", + "block": "char * ptr\nsize_t reserved\nsize_t size", "tdef": "typedef", "description": " A data buffer for exporting data from libgit2", - "comments": "

Sometimes libgit2 wants to return an allocated data buffer to the caller and have the caller take responsibility for freeing that memory. This can be awkward if the caller does not have easy access to the same allocation functions that libgit2 is using. In those cases, libgit2 will fill in a git_buf and the caller can use git_buf_free() to release it when they are done.

\n\n

A git_buf may also be used for the caller to pass in a reference to a block of memory they hold. In this case, libgit2 will not resize or free the memory, but will read from it as needed.

\n\n

A git_buf is a public structure with three fields:

\n\n\n\n

Some APIs may occasionally do something slightly unusual with a buffer, such as setting ptr to a value that was passed in by the user. In those cases, the behavior will be clearly documented by the API.

\n", + "comments": "

Sometimes libgit2 wants to return an allocated data buffer to the caller and have the caller take responsibility for freeing that memory. To make ownership clear in these cases, libgit2 uses git_buf to return this data. Callers should use git_buf_dispose() to release the memory when they are done.

\n\n

A git_buf contains a pointer to a NUL-terminated C string, and the length of the string (not including the NUL terminator).

\n", "fields": [ { "type": "char *", "name": "ptr", - "comments": "" + "comments": " The buffer contents. `ptr` points to the start of the buffer\n being returned. The buffer's length (in bytes) is specified\n by the `size` member of the structure, and contains a NUL\n terminator at position `(size + 1)`." }, { "type": "size_t", - "name": "asize", - "comments": "" + "name": "reserved", + "comments": " This field is reserved and unused." }, { "type": "size_t", "name": "size", - "comments": "" + "comments": " The length (in bytes) of the buffer pointed to by `ptr`,\n not including a NUL terminator." } ], "used": { "returns": [], "needs": [ + "git_blob_filter", "git_blob_filtered_content", + "git_branch_remote_name", + "git_branch_upstream_merge", + "git_branch_upstream_name", + "git_branch_upstream_remote", "git_buf_contains_nul", + "git_buf_dispose", "git_buf_free", "git_buf_grow", "git_buf_is_binary", @@ -26921,6 +25708,7 @@ "git_commit_create_buffer", "git_commit_extract_signature", "git_commit_header_field", + "git_commit_signing_cb", "git_config_find_global", "git_config_find_programdata", "git_config_find_system", @@ -26933,14 +25721,15 @@ "git_diff_format_email", "git_diff_stats_to_buf", "git_diff_to_buf", - "git_filter_apply_fn", + "git_email_create_from_commit", "git_filter_list_apply_to_blob", + "git_filter_list_apply_to_buffer", "git_filter_list_apply_to_data", "git_filter_list_apply_to_file", "git_filter_list_stream_data", - "git_mempack_dump", "git_message_prettify", "git_object_short_id", + "git_packbuilder_write_buf", "git_patch_to_buf", "git_refspec_rtransform", "git_refspec_transform", @@ -26950,6 +25739,7 @@ "git_repository_message", "git_submodule_resolve_url", "git_treebuilder_write_with_buffer", + "git_url_resolve_cb", "git_worktree_is_locked" ] } @@ -26958,14 +25748,12 @@ [ "git_cert", { - "decl": [ - "git_cert_t cert_type" - ], + "decl": "git_cert", "type": "struct", "value": "git_cert", - "file": "types.h", - "line": 318, - "lineto": 323, + "file": "git2/types.h", + "line": 278, + "lineto": 278, "block": "git_cert_t cert_type", "tdef": "typedef", "description": " Parent type for `git_cert_hostkey` and `git_cert_x509`.", @@ -26979,10 +25767,7 @@ ], "used": { "returns": [], - "needs": [ - "git_transport_certificate_check_cb", - "git_transport_smart_certificate_check" - ] + "needs": ["git_transport_certificate_check_cb"] } } ], @@ -26993,14 +25778,18 @@ "git_cert parent", "git_cert_ssh_t type", "unsigned char [16] hash_md5", - "unsigned char [20] hash_sha1" + "unsigned char [20] hash_sha1", + "unsigned char [32] hash_sha256", + "git_cert_ssh_raw_type_t raw_type", + "const char * hostkey", + "size_t hostkey_len" ], "type": "struct", "value": "git_cert_hostkey", - "file": "transport.h", - "line": 39, - "lineto": 59, - "block": "git_cert parent\ngit_cert_ssh_t type\nunsigned char [16] hash_md5\nunsigned char [20] hash_sha1", + "file": "git2/cert.h", + "line": 108, + "lineto": 151, + "block": "git_cert parent\ngit_cert_ssh_t type\nunsigned char [16] hash_md5\nunsigned char [20] hash_sha1\nunsigned char [32] hash_sha256\ngit_cert_ssh_raw_type_t raw_type\nconst char * hostkey\nsize_t hostkey_len", "tdef": "typedef", "description": " Hostkey information taken from libssh2", "comments": "", @@ -27008,28 +25797,45 @@ { "type": "git_cert", "name": "parent", - "comments": "" + "comments": " The parent cert " }, { "type": "git_cert_ssh_t", "name": "type", - "comments": " A hostkey type from libssh2, either\n `GIT_CERT_SSH_MD5` or `GIT_CERT_SSH_SHA1`" + "comments": " A bitmask containing the available fields." }, { "type": "unsigned char [16]", "name": "hash_md5", - "comments": " Hostkey hash. If type has `GIT_CERT_SSH_MD5` set, this will\n have the MD5 hash of the hostkey." + "comments": " Hostkey hash. If `type` has `GIT_CERT_SSH_MD5` set, this will\n have the MD5 hash of the hostkey." }, { "type": "unsigned char [20]", "name": "hash_sha1", - "comments": " Hostkey hash. If type has `GIT_CERT_SSH_SHA1` set, this will\n have the SHA-1 hash of the hostkey." + "comments": " Hostkey hash. If `type` has `GIT_CERT_SSH_SHA1` set, this will\n have the SHA-1 hash of the hostkey." + }, + { + "type": "unsigned char [32]", + "name": "hash_sha256", + "comments": " Hostkey hash. If `type` has `GIT_CERT_SSH_SHA256` set, this will\n have the SHA-256 hash of the hostkey." + }, + { + "type": "git_cert_ssh_raw_type_t", + "name": "raw_type", + "comments": " Raw hostkey type. If `type` has `GIT_CERT_SSH_RAW` set, this will\n have the type of the raw hostkey." + }, + { + "type": "const char *", + "name": "hostkey", + "comments": " Pointer to the raw hostkey. If `type` has `GIT_CERT_SSH_RAW` set,\n this will have the raw contents of the hostkey." + }, + { + "type": "size_t", + "name": "hostkey_len", + "comments": " Raw hostkey length. If `type` has `GIT_CERT_SSH_RAW` set, this will\n have the length of the raw contents of the hostkey." } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -27037,13 +25843,15 @@ { "decl": [ "GIT_CERT_SSH_MD5", - "GIT_CERT_SSH_SHA1" + "GIT_CERT_SSH_SHA1", + "GIT_CERT_SSH_SHA256", + "GIT_CERT_SSH_RAW" ], "type": "enum", - "file": "transport.h", - "line": 29, - "lineto": 34, - "block": "GIT_CERT_SSH_MD5\nGIT_CERT_SSH_SHA1", + "file": "git2/cert.h", + "line": 77, + "lineto": 86, + "block": "GIT_CERT_SSH_MD5\nGIT_CERT_SSH_SHA1\nGIT_CERT_SSH_SHA256\nGIT_CERT_SSH_RAW", "tdef": "typedef", "description": " Type of SSH host fingerprint", "comments": "", @@ -27059,12 +25867,21 @@ "name": "GIT_CERT_SSH_SHA1", "comments": "

SHA-1 is available

\n", "value": 2 + }, + { + "type": "int", + "name": "GIT_CERT_SSH_SHA256", + "comments": "

SHA-256 is available

\n", + "value": 4 + }, + { + "type": "int", + "name": "GIT_CERT_SSH_RAW", + "comments": "

Raw hostkey is available

\n", + "value": 8 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -27077,9 +25894,9 @@ "GIT_CERT_STRARRAY" ], "type": "enum", - "file": "types.h", - "line": 290, - "lineto": 313, + "file": "git2/cert.h", + "line": 25, + "lineto": 48, "block": "GIT_CERT_NONE\nGIT_CERT_X509\nGIT_CERT_HOSTKEY_LIBSSH2\nGIT_CERT_STRARRAY\nGIT_CERT_NONE\nGIT_CERT_X509\nGIT_CERT_HOSTKEY_LIBSSH2\nGIT_CERT_STRARRAY", "tdef": "typedef", "description": " Type of host certificate structure that is passed to the check callback", @@ -27110,25 +25927,18 @@ "value": 3 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ "git_cert_x509", { - "decl": [ - "git_cert parent", - "void * data", - "size_t len" - ], + "decl": ["git_cert parent", "void * data", "size_t len"], "type": "struct", "value": "git_cert_x509", - "file": "transport.h", - "line": 64, - "lineto": 74, + "file": "git2/cert.h", + "line": 156, + "lineto": 168, "block": "git_cert parent\nvoid * data\nsize_t len", "tdef": "typedef", "description": " X.509 certificate information", @@ -27137,7 +25947,7 @@ { "type": "git_cert", "name": "parent", - "comments": "" + "comments": " The parent cert " }, { "type": "void *", @@ -27150,10 +25960,7 @@ "comments": " Length of the memory block pointed to by `data`." } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -27169,13 +25976,13 @@ "GIT_CHECKOUT_NOTIFY_ALL" ], "type": "enum", - "file": "checkout.h", - "line": 205, - "lineto": 214, + "file": "git2/checkout.h", + "line": 224, + "lineto": 255, "block": "GIT_CHECKOUT_NOTIFY_NONE\nGIT_CHECKOUT_NOTIFY_CONFLICT\nGIT_CHECKOUT_NOTIFY_DIRTY\nGIT_CHECKOUT_NOTIFY_UPDATED\nGIT_CHECKOUT_NOTIFY_UNTRACKED\nGIT_CHECKOUT_NOTIFY_IGNORED\nGIT_CHECKOUT_NOTIFY_ALL", "tdef": "typedef", "description": " Checkout notification flags", - "comments": "

Checkout will invoke an options notification callback (notify_cb) for certain cases - you pick which ones via notify_flags:

\n\n\n\n

Returning a non-zero value from this callback will cancel the checkout. The non-zero return value will be propagated back and returned by the git_checkout_... call.

\n\n

Notification callbacks are made prior to modifying any files on disk, so canceling on any notification will still happen prior to any files being modified.

\n", + "comments": "

Checkout will invoke an options notification callback (notify_cb) for certain cases - you pick which ones via notify_flags:

\n\n

Returning a non-zero value from this callback will cancel the checkout. The non-zero return value will be propagated back and returned by the git_checkout_... call.

\n\n

Notification callbacks are made prior to modifying any files on disk, so canceling on any notification will still happen prior to any files being modified.

\n", "fields": [ { "type": "int", @@ -27186,46 +25993,41 @@ { "type": "int", "name": "GIT_CHECKOUT_NOTIFY_CONFLICT", - "comments": "", + "comments": "

Invokes checkout on conflicting paths.

\n", "value": 1 }, { "type": "int", "name": "GIT_CHECKOUT_NOTIFY_DIRTY", - "comments": "", + "comments": "

Notifies about "dirty" files, i.e. those that do not need an update\n but no longer match the baseline. Core git displays these files when\n checkout runs, but won't stop the checkout.

\n", "value": 2 }, { "type": "int", "name": "GIT_CHECKOUT_NOTIFY_UPDATED", - "comments": "", + "comments": "

Sends notification for any file changed.

\n", "value": 4 }, { "type": "int", "name": "GIT_CHECKOUT_NOTIFY_UNTRACKED", - "comments": "", + "comments": "

Notifies about untracked files.

\n", "value": 8 }, { "type": "int", "name": "GIT_CHECKOUT_NOTIFY_IGNORED", - "comments": "", + "comments": "

Notifies about ignored files.

\n", "value": 16 }, { "type": "int", "name": "GIT_CHECKOUT_NOTIFY_ALL", - "comments": "", + "comments": "

Notifies about ignored files.

\n", "value": 65535 } ], - "used": { - "returns": [], - "needs": [ - "git_checkout_notify_cb" - ] - } + "used": { "returns": [], "needs": ["git_checkout_notify_cb"] } } ], [ @@ -27255,23 +26057,23 @@ ], "type": "struct", "value": "git_checkout_options", - "file": "checkout.h", - "line": 251, - "lineto": 295, + "file": "git2/checkout.h", + "line": 317, + "lineto": 391, "block": "unsigned int version\nunsigned int checkout_strategy\nint disable_filters\nunsigned int dir_mode\nunsigned int file_mode\nint file_open_flags\nunsigned int notify_flags\ngit_checkout_notify_cb notify_cb\nvoid * notify_payload\ngit_checkout_progress_cb progress_cb\nvoid * progress_payload\ngit_strarray paths\ngit_tree * baseline\ngit_index * baseline_index\nconst char * target_directory\nconst char * ancestor_label\nconst char * our_label\nconst char * their_label\ngit_checkout_perfdata_cb perfdata_cb\nvoid * perfdata_payload", "tdef": "typedef", "description": " Checkout options structure", - "comments": "

Zero out for defaults. Initialize with GIT_CHECKOUT_OPTIONS_INIT macro to correctly set the version field. E.g.

\n\n
    git_checkout_options opts = GIT_CHECKOUT_OPTIONS_INIT;\n
\n", + "comments": "

Initialize with GIT_CHECKOUT_OPTIONS_INIT. Alternatively, you can use git_checkout_options_init.

\n\n

[version] GIT_CHECKOUT_OPTIONS_VERSION [init_macro] GIT_CHECKOUT_OPTIONS_INIT [init_function] git_checkout_options_init

\n", "fields": [ { "type": "unsigned int", "name": "version", - "comments": "" + "comments": " The version " }, { "type": "unsigned int", "name": "checkout_strategy", - "comments": " default will be a dry run " + "comments": " default will be a safe checkout " }, { "type": "int", @@ -27296,17 +26098,17 @@ { "type": "unsigned int", "name": "notify_flags", - "comments": " see `git_checkout_notify_t` above " + "comments": " Checkout notification flags specify what operations the notify\n callback is invoked for.\n\n \n\n[flags] git_checkout_notify_t" }, { "type": "git_checkout_notify_cb", "name": "notify_cb", - "comments": "" + "comments": " Optional callback to get notifications on specific file states.\n " }, { "type": "void *", "name": "notify_payload", - "comments": "" + "comments": " Payload passed to notify_cb " }, { "type": "git_checkout_progress_cb", @@ -27316,22 +26118,22 @@ { "type": "void *", "name": "progress_payload", - "comments": "" + "comments": " Payload passed to progress_cb " }, { "type": "git_strarray", "name": "paths", - "comments": " When not zeroed out, array of fnmatch patterns specifying which\n paths should be taken into account, otherwise all files. Use\n GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH to treat as simple list." + "comments": " A list of wildmatch patterns or paths.\n\n By default, all paths are processed. If you pass an array of wildmatch\n patterns, those will be used to filter which paths should be taken into\n account.\n\n Use GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH to treat as a simple list." }, { "type": "git_tree *", "name": "baseline", - "comments": " The expected content of the working directory; defaults to HEAD.\n If the working directory does not match this baseline information,\n that will produce a checkout conflict." + "comments": " The expected content of the working directory; defaults to HEAD.\n\n If the working directory does not match this baseline information,\n that will produce a checkout conflict." }, { "type": "git_index *", "name": "baseline_index", - "comments": " expected content of workdir, expressed as an index. " + "comments": " Like `baseline` above, though expressed as an index. This\n option overrides `baseline`." }, { "type": "const char *", @@ -27361,7 +26163,7 @@ { "type": "void *", "name": "perfdata_payload", - "comments": "" + "comments": " Payload passed to perfdata_cb " } ], "used": { @@ -27369,7 +26171,7 @@ "needs": [ "git_checkout_head", "git_checkout_index", - "git_checkout_init_options", + "git_checkout_options_init", "git_checkout_tree", "git_merge", "git_reset", @@ -27378,11 +26180,35 @@ } } ], + [ + "git_checkout_perfdata", + { + "decl": [ + "size_t mkdir_calls", + "size_t stat_calls", + "size_t chmod_calls" + ], + "type": "struct", + "value": "git_checkout_perfdata", + "file": "git2/checkout.h", + "line": 258, + "lineto": 262, + "block": "size_t mkdir_calls\nsize_t stat_calls\nsize_t chmod_calls", + "tdef": "typedef", + "description": " Checkout performance-reporting structure ", + "comments": "", + "fields": [ + { "type": "size_t", "name": "mkdir_calls", "comments": "" }, + { "type": "size_t", "name": "stat_calls", "comments": "" }, + { "type": "size_t", "name": "chmod_calls", "comments": "" } + ], + "used": { "returns": [], "needs": ["git_checkout_perfdata_cb"] } + } + ], [ "git_checkout_strategy_t", { "decl": [ - "GIT_CHECKOUT_NONE", "GIT_CHECKOUT_SAFE", "GIT_CHECKOUT_FORCE", "GIT_CHECKOUT_RECREATE_MISSING", @@ -27402,34 +26228,31 @@ "GIT_CHECKOUT_CONFLICT_STYLE_DIFF3", "GIT_CHECKOUT_DONT_REMOVE_EXISTING", "GIT_CHECKOUT_DONT_WRITE_INDEX", + "GIT_CHECKOUT_DRY_RUN", + "GIT_CHECKOUT_CONFLICT_STYLE_ZDIFF3", + "GIT_CHECKOUT_NONE", "GIT_CHECKOUT_UPDATE_SUBMODULES", "GIT_CHECKOUT_UPDATE_SUBMODULES_IF_CHANGED" ], "type": "enum", - "file": "checkout.h", - "line": 106, - "lineto": 177, - "block": "GIT_CHECKOUT_NONE\nGIT_CHECKOUT_SAFE\nGIT_CHECKOUT_FORCE\nGIT_CHECKOUT_RECREATE_MISSING\nGIT_CHECKOUT_ALLOW_CONFLICTS\nGIT_CHECKOUT_REMOVE_UNTRACKED\nGIT_CHECKOUT_REMOVE_IGNORED\nGIT_CHECKOUT_UPDATE_ONLY\nGIT_CHECKOUT_DONT_UPDATE_INDEX\nGIT_CHECKOUT_NO_REFRESH\nGIT_CHECKOUT_SKIP_UNMERGED\nGIT_CHECKOUT_USE_OURS\nGIT_CHECKOUT_USE_THEIRS\nGIT_CHECKOUT_DISABLE_PATHSPEC_MATCH\nGIT_CHECKOUT_SKIP_LOCKED_DIRECTORIES\nGIT_CHECKOUT_DONT_OVERWRITE_IGNORED\nGIT_CHECKOUT_CONFLICT_STYLE_MERGE\nGIT_CHECKOUT_CONFLICT_STYLE_DIFF3\nGIT_CHECKOUT_DONT_REMOVE_EXISTING\nGIT_CHECKOUT_DONT_WRITE_INDEX\nGIT_CHECKOUT_UPDATE_SUBMODULES\nGIT_CHECKOUT_UPDATE_SUBMODULES_IF_CHANGED", + "file": "git2/checkout.h", + "line": 113, + "lineto": 206, + "block": "GIT_CHECKOUT_SAFE\nGIT_CHECKOUT_FORCE\nGIT_CHECKOUT_RECREATE_MISSING\nGIT_CHECKOUT_ALLOW_CONFLICTS\nGIT_CHECKOUT_REMOVE_UNTRACKED\nGIT_CHECKOUT_REMOVE_IGNORED\nGIT_CHECKOUT_UPDATE_ONLY\nGIT_CHECKOUT_DONT_UPDATE_INDEX\nGIT_CHECKOUT_NO_REFRESH\nGIT_CHECKOUT_SKIP_UNMERGED\nGIT_CHECKOUT_USE_OURS\nGIT_CHECKOUT_USE_THEIRS\nGIT_CHECKOUT_DISABLE_PATHSPEC_MATCH\nGIT_CHECKOUT_SKIP_LOCKED_DIRECTORIES\nGIT_CHECKOUT_DONT_OVERWRITE_IGNORED\nGIT_CHECKOUT_CONFLICT_STYLE_MERGE\nGIT_CHECKOUT_CONFLICT_STYLE_DIFF3\nGIT_CHECKOUT_DONT_REMOVE_EXISTING\nGIT_CHECKOUT_DONT_WRITE_INDEX\nGIT_CHECKOUT_DRY_RUN\nGIT_CHECKOUT_CONFLICT_STYLE_ZDIFF3\nGIT_CHECKOUT_NONE\nGIT_CHECKOUT_UPDATE_SUBMODULES\nGIT_CHECKOUT_UPDATE_SUBMODULES_IF_CHANGED", "tdef": "typedef", "description": " Checkout behavior flags", - "comments": "

In libgit2, checkout is used to update the working directory and index to match a target tree. Unlike git checkout, it does not move the HEAD commit for you - use git_repository_set_head or the like to do that.

\n\n

Checkout looks at (up to) four things: the "target" tree you want to check out, the "baseline" tree of what was checked out previously, the working directory for actual files, and the index for staged changes.

\n\n

You give checkout one of three strategies for update:

\n\n\n\n

To emulate git checkout, use GIT_CHECKOUT_SAFE with a checkout notification callback (see below) that displays information about dirty files. The default behavior will cancel checkout on conflicts.

\n\n

To emulate git checkout-index, use GIT_CHECKOUT_SAFE with a notification callback that cancels the operation if a dirty-but-existing file is found in the working directory. This core git command isn't quite "force" but is sensitive about some types of changes.

\n\n

To emulate git checkout -f, use GIT_CHECKOUT_FORCE.

\n\n

There are some additional flags to modified the behavior of checkout:

\n\n\n", + "comments": "

In libgit2, checkout is used to update the working directory and index to match a target tree. Unlike git checkout, it does not move the HEAD commit for you - use git_repository_set_head or the like to do that.

\n\n

Checkout looks at (up to) four things: the "target" tree you want to check out, the "baseline" tree of what was checked out previously, the working directory for actual files, and the index for staged changes.

\n\n

You give checkout one of two strategies for update:

\n\n\n\n

To emulate git checkout, use GIT_CHECKOUT_SAFE with a checkout notification callback (see below) that displays information about dirty files. The default behavior will cancel checkout on conflicts.

\n\n

To emulate git checkout-index, use GIT_CHECKOUT_SAFE with a notification callback that cancels the operation if a dirty-but-existing file is found in the working directory. This core git command isn't quite "force" but is sensitive about some types of changes.

\n\n

To emulate git checkout -f, use GIT_CHECKOUT_FORCE.

\n\n

There are some additional flags to modify the behavior of checkout:

\n\n\n", "fields": [ - { - "type": "int", - "name": "GIT_CHECKOUT_NONE", - "comments": "

default is a dry run, no actual updates

\n", - "value": 0 - }, { "type": "int", "name": "GIT_CHECKOUT_SAFE", - "comments": "

Allow safe updates that cannot overwrite uncommitted data

\n", - "value": 1 + "comments": "

Allow safe updates that cannot overwrite uncommitted data.\n If the uncommitted changes don't conflict with the checked\n out files, the checkout will still proceed, leaving the\n changes intact.

\n", + "value": 0 }, { "type": "int", "name": "GIT_CHECKOUT_FORCE", - "comments": "

Allow all updates to force working directory to look like index

\n", + "comments": "

Allow all updates to force working directory to look like\n the index, potentially losing data in the process.

\n", "value": 2 }, { @@ -27534,6 +26357,24 @@ "comments": "

Normally checkout writes the index upon completion; this prevents that.

\n", "value": 8388608 }, + { + "type": "int", + "name": "GIT_CHECKOUT_DRY_RUN", + "comments": "

Perform a "dry run", reporting what would be done but\n without actually making changes in the working directory\n or the index.

\n", + "value": 16777216 + }, + { + "type": "int", + "name": "GIT_CHECKOUT_CONFLICT_STYLE_ZDIFF3", + "comments": "

Include common ancestor data in zdiff3 format for conflicts

\n", + "value": 33554432 + }, + { + "type": "int", + "name": "GIT_CHECKOUT_NONE", + "comments": "

Do not do a checkout and do not fire callbacks; this is primarily\n useful only for internal functions that will perform the\n checkout themselves but need to pass checkout options into\n another function, for example, git_clone.

\n", + "value": 1073741824 + }, { "type": "int", "name": "GIT_CHECKOUT_UPDATE_SUBMODULES", @@ -27547,10 +26388,7 @@ "value": 131072 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -27564,19 +26402,15 @@ ], "type": "struct", "value": "git_cherrypick_options", - "file": "cherrypick.h", - "line": 26, - "lineto": 34, + "file": "git2/cherrypick.h", + "line": 29, + "lineto": 37, "block": "unsigned int version\nunsigned int mainline\ngit_merge_options merge_opts\ngit_checkout_options checkout_opts", "tdef": "typedef", "description": " Cherry-pick options", "comments": "", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "unsigned int", "name": "mainline", @@ -27595,10 +26429,7 @@ ], "used": { "returns": [], - "needs": [ - "git_cherrypick", - "git_cherrypick_init_options" - ] + "needs": ["git_cherrypick", "git_cherrypick_options_init"] } } ], @@ -27612,9 +26443,9 @@ "GIT_CLONE_LOCAL_NO_LINKS" ], "type": "enum", - "file": "clone.h", - "line": 33, - "lineto": 53, + "file": "git2/clone.h", + "line": 37, + "lineto": 57, "block": "GIT_CLONE_LOCAL_AUTO\nGIT_CLONE_LOCAL\nGIT_CLONE_NO_LOCAL\nGIT_CLONE_LOCAL_NO_LINKS", "tdef": "typedef", "description": " Options for bypassing the git-aware transport on clone. Bypassing\n it means that instead of a fetch, libgit2 will copy the object\n database directory instead of figuring out what it needs, which is\n faster. If possible, it will hardlink the files to save space.", @@ -27645,10 +26476,7 @@ "value": 3 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -27668,23 +26496,19 @@ ], "type": "struct", "value": "git_clone_options", - "file": "clone.h", - "line": 103, - "lineto": 164, + "file": "git2/clone.h", + "line": 110, + "lineto": 171, "block": "unsigned int version\ngit_checkout_options checkout_opts\ngit_fetch_options fetch_opts\nint bare\ngit_clone_local_t local\nconst char * checkout_branch\ngit_repository_create_cb repository_cb\nvoid * repository_cb_payload\ngit_remote_create_cb remote_cb\nvoid * remote_cb_payload", "tdef": "typedef", "description": " Clone options structure", - "comments": "

Use the GIT_CLONE_OPTIONS_INIT to get the default settings, like this:

\n\n
    git_clone_options opts = GIT_CLONE_OPTIONS_INIT;\n
\n", + "comments": "

Initialize with GIT_CLONE_OPTIONS_INIT. Alternatively, you can use git_clone_options_init.

\n\n

[version] GIT_CLONE_OPTIONS_VERSION [init_macro] GIT_CLONE_OPTIONS_INIT [init_function] git_clone_options_init

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "git_checkout_options", "name": "checkout_opts", - "comments": " These options are passed to the checkout step. To disable\n checkout, set the `checkout_strategy` to\n `GIT_CHECKOUT_NONE`." + "comments": " These options are passed to the checkout step. To disable\n checkout, set the `checkout_strategy` to `GIT_CHECKOUT_NONE`\n or `GIT_CHECKOUT_DRY_RUN`." }, { "type": "git_fetch_options", @@ -27729,10 +26553,7 @@ ], "used": { "returns": [], - "needs": [ - "git_clone", - "git_clone_init_options" - ] + "needs": ["git_clone", "git_clone_options_init"] } } ], @@ -27742,9 +26563,9 @@ "decl": "git_commit", "type": "struct", "value": "git_commit", - "file": "types.h", - "line": 123, - "lineto": 123, + "file": "git2/types.h", + "line": 141, + "lineto": 141, "tdef": "typedef", "description": " Parsed representation of a commit object. ", "comments": "", @@ -27756,11 +26577,14 @@ "git_cherrypick_commit", "git_commit_amend", "git_commit_author", + "git_commit_author_with_mailmap", "git_commit_body", "git_commit_committer", + "git_commit_committer_with_mailmap", "git_commit_create", "git_commit_create_buffer", - "git_commit_create_from_callback", + "git_commit_create_cb", + "git_commit_create_from_stage", "git_commit_dup", "git_commit_free", "git_commit_header_field", @@ -27781,40 +26605,116 @@ "git_commit_time_offset", "git_commit_tree", "git_commit_tree_id", + "git_commitarray_dispose", "git_diff_commit_as_email", + "git_email_create_from_commit", "git_merge_commits", - "git_note_commit_create", - "git_note_commit_iterator_new", - "git_note_commit_read", - "git_note_commit_remove", + "git_odb_set_commit_graph", + "git_repository_commit_parents", "git_revert", "git_revert_commit" ] } } ], + [ + "git_commit_graph", + { + "decl": "git_commit_graph", + "type": "struct", + "value": "git_commit_graph", + "file": "git2/types.h", + "line": 114, + "lineto": 114, + "tdef": "typedef", + "description": " A git commit-graph ", + "comments": "", + "used": { "returns": [], "needs": ["git_odb_set_commit_graph"] } + } + ], + [ + "git_commit_graph_split_strategy_t", + { + "decl": ["GIT_COMMIT_GRAPH_SPLIT_STRATEGY_SINGLE_FILE"], + "type": "enum", + "file": "git2/sys/commit_graph.h", + "line": 93, + "lineto": 99, + "block": "GIT_COMMIT_GRAPH_SPLIT_STRATEGY_SINGLE_FILE", + "tdef": "typedef", + "description": " The strategy to use when adding a new set of commits to a pre-existing\n commit-graph chain.", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_COMMIT_GRAPH_SPLIT_STRATEGY_SINGLE_FILE", + "comments": "

Do not split commit-graph files. The other split strategy-related option\n fields are ignored.

\n", + "value": 0 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_commit_graph_writer", + { + "decl": "git_commit_graph_writer", + "type": "struct", + "value": "git_commit_graph_writer", + "file": "git2/types.h", + "line": 117, + "lineto": 117, + "tdef": "typedef", + "description": " a writer for commit-graph files. ", + "comments": "", + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_commitarray", + { + "decl": ["git_commit *const * commits", "size_t count"], + "type": "struct", + "value": "git_commitarray", + "file": "git2/commit.h", + "line": 655, + "lineto": 658, + "block": "git_commit *const * commits\nsize_t count", + "tdef": "typedef", + "description": " An array of commits returned from the library ", + "comments": "", + "fields": [ + { "type": "git_commit *const *", "name": "commits", "comments": "" }, + { "type": "size_t", "name": "count", "comments": "" } + ], + "used": { + "returns": [], + "needs": ["git_commitarray_dispose", "git_repository_commit_parents"] + } + } + ], [ "git_config", { "decl": "git_config", "type": "struct", "value": "git_config", - "file": "types.h", - "line": 141, - "lineto": 141, + "file": "git2/types.h", + "line": 162, + "lineto": 162, "tdef": "typedef", "description": " Memory representation of a set of config files ", "comments": "", "used": { "returns": [], "needs": [ - "git_config_add_backend", "git_config_add_file_ondisk", "git_config_backend_foreach_match", "git_config_delete_entry", "git_config_delete_multivar", "git_config_entry_free", "git_config_foreach", + "git_config_foreach_cb", "git_config_foreach_match", "git_config_free", "git_config_get_bool", @@ -27826,11 +26726,11 @@ "git_config_get_path", "git_config_get_string", "git_config_get_string_buf", - "git_config_init_backend", "git_config_iterator_free", "git_config_iterator_glob_new", "git_config_iterator_new", "git_config_lock", + "git_config_lookup_map_value", "git_config_multivar_iterator_new", "git_config_new", "git_config_next", @@ -27843,10 +26743,10 @@ "git_config_set_int64", "git_config_set_multivar", "git_config_set_string", + "git_config_set_writeorder", "git_config_snapshot", "git_repository_config", - "git_repository_config_snapshot", - "git_repository_set_config" + "git_repository_config_snapshot" ] } } @@ -27857,93 +26757,46 @@ "decl": "git_config_backend", "type": "struct", "value": "git_config_backend", - "file": "types.h", - "line": 144, - "lineto": 144, - "block": "unsigned int version\nint readonly\nstruct git_config * cfg\nint (*)(struct git_config_backend *, git_config_level_t, const git_repository *) open\nint (*)(struct git_config_backend *, const char *, git_config_entry **) get\nint (*)(struct git_config_backend *, const char *, const char *) set\nint (*)(git_config_backend *, const char *, const char *, const char *) set_multivar\nint (*)(struct git_config_backend *, const char *) del\nint (*)(struct git_config_backend *, const char *, const char *) del_multivar\nint (*)(git_config_iterator **, struct git_config_backend *) iterator\nint (*)(struct git_config_backend **, struct git_config_backend *) snapshot\nint (*)(struct git_config_backend *) lock\nint (*)(struct git_config_backend *, int) unlock\nvoid (*)(struct git_config_backend *) free", + "file": "git2/types.h", + "line": 165, + "lineto": 165, "tdef": "typedef", "description": " Interface to access a configuration file ", "comments": "", + "used": { "returns": [], "needs": ["git_config_backend_foreach_match"] } + } + ], + [ + "git_config_backend_memory_options", + { + "decl": [ + "unsigned int version", + "const char * backend_type", + "const char * origin_path" + ], + "type": "struct", + "value": "git_config_backend_memory_options", + "file": "git2/sys/config.h", + "line": 148, + "lineto": 162, + "block": "unsigned int version\nconst char * backend_type\nconst char * origin_path", + "tdef": "typedef", + "description": " Options for in-memory configuration backends. ", + "comments": "", "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, { - "type": "unsigned int", - "name": "version", - "comments": "" - }, - { - "type": "int", - "name": "readonly", - "comments": " True if this backend is for a snapshot " - }, - { - "type": "struct git_config *", - "name": "cfg", - "comments": "" - }, - { - "type": "int (*)(struct git_config_backend *, git_config_level_t, const git_repository *)", - "name": "open", - "comments": "" - }, - { - "type": "int (*)(struct git_config_backend *, const char *, git_config_entry **)", - "name": "get", - "comments": "" - }, - { - "type": "int (*)(struct git_config_backend *, const char *, const char *)", - "name": "set", - "comments": "" - }, - { - "type": "int (*)(git_config_backend *, const char *, const char *, const char *)", - "name": "set_multivar", - "comments": "" - }, - { - "type": "int (*)(struct git_config_backend *, const char *)", - "name": "del", - "comments": "" - }, - { - "type": "int (*)(struct git_config_backend *, const char *, const char *)", - "name": "del_multivar", - "comments": "" - }, - { - "type": "int (*)(git_config_iterator **, struct git_config_backend *)", - "name": "iterator", - "comments": "" - }, - { - "type": "int (*)(struct git_config_backend **, struct git_config_backend *)", - "name": "snapshot", - "comments": " Produce a read-only version of this backend " - }, - { - "type": "int (*)(struct git_config_backend *)", - "name": "lock", - "comments": " Lock this backend.\n\n Prevent any writes to the data store backing this\n backend. Any updates must not be visible to any other\n readers." - }, - { - "type": "int (*)(struct git_config_backend *, int)", - "name": "unlock", - "comments": " Unlock the data store backing this backend. If success is\n true, the changes should be committed, otherwise rolled\n back." + "type": "const char *", + "name": "backend_type", + "comments": " The type of this backend (eg, \"command line\"). If this is\n NULL, then this will be \"in-memory\"." }, { - "type": "void (*)(struct git_config_backend *)", - "name": "free", - "comments": "" + "type": "const char *", + "name": "origin_path", + "comments": " The path to the origin; if this is NULL then it will be\n left unset in the resulting configuration entries." } ], - "used": { - "returns": [], - "needs": [ - "git_config_add_backend", - "git_config_backend_foreach_match", - "git_config_init_backend" - ] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -27952,16 +26805,17 @@ "decl": [ "const char * name", "const char * value", - "git_config_level_t level", - "void (*)(struct git_config_entry *) free", - "void * payload" + "const char * backend_type", + "const char * origin_path", + "unsigned int include_depth", + "git_config_level_t level" ], "type": "struct", "value": "git_config_entry", - "file": "config.h", - "line": 64, - "lineto": 70, - "block": "const char * name\nconst char * value\ngit_config_level_t level\nvoid (*)(struct git_config_entry *) free\nvoid * payload", + "file": "git2/config.h", + "line": 103, + "lineto": 124, + "block": "const char * name\nconst char * value\nconst char * backend_type\nconst char * origin_path\nunsigned int include_depth\ngit_config_level_t level", "tdef": "typedef", "description": " An entry in a configuration file", "comments": "", @@ -27969,33 +26823,39 @@ { "type": "const char *", "name": "name", - "comments": " Name of the entry (normalised) " + "comments": " Name of the configuration entry (normalized). " }, { "type": "const char *", "name": "value", - "comments": " String value of the entry " + "comments": " Literal (string) value of the entry. " }, { - "type": "git_config_level_t", - "name": "level", - "comments": " Which config file this was found in " + "type": "const char *", + "name": "backend_type", + "comments": " The type of backend that this entry exists in (eg, \"file\"). " }, { - "type": "void (*)(struct git_config_entry *)", - "name": "free", - "comments": " Free function for this entry " + "type": "const char *", + "name": "origin_path", + "comments": " The path to the origin of this entry. For config files, this is\n the path to the file." }, { - "type": "void *", - "name": "payload", - "comments": " Opaque value for the free function. Do not read or write " + "type": "unsigned int", + "name": "include_depth", + "comments": " Depth of includes where this variable was found. " + }, + { + "type": "git_config_level_t", + "name": "level", + "comments": " Configuration level for the file this was found in. " } ], "used": { "returns": [], "needs": [ "git_config_entry_free", + "git_config_foreach_cb", "git_config_get_entry", "git_config_next" ] @@ -28005,43 +26865,15 @@ [ "git_config_iterator", { - "decl": [ - "git_config_backend * backend", - "unsigned int flags", - "int (*)(git_config_entry **, git_config_iterator *) next", - "void (*)(git_config_iterator *) free" - ], + "decl": "git_config_iterator", "type": "struct", "value": "git_config_iterator", - "file": "sys/config.h", - "line": 34, - "lineto": 48, - "block": "git_config_backend * backend\nunsigned int flags\nint (*)(git_config_entry **, git_config_iterator *) next\nvoid (*)(git_config_iterator *) free", - "tdef": null, - "description": " Every iterator must have this struct as its first element, so the\n API can talk to it. You'd define your iterator as", - "comments": "
 struct my_iterator {             git_config_iterator parent;             ...     }\n
\n\n

and assign iter->parent.backend to your git_config_backend.

\n", - "fields": [ - { - "type": "git_config_backend *", - "name": "backend", - "comments": "" - }, - { - "type": "unsigned int", - "name": "flags", - "comments": "" - }, - { - "type": "int (*)(git_config_entry **, git_config_iterator *)", - "name": "next", - "comments": " Return the current entry and advance the iterator. The\n memory belongs to the library." - }, - { - "type": "void (*)(git_config_iterator *)", - "name": "free", - "comments": " Free the iterator" - } - ], + "file": "git2/config.h", + "line": 145, + "lineto": 145, + "tdef": "typedef", + "description": " An opaque structure for a configuration iterator.", + "comments": "", "used": { "returns": [], "needs": [ @@ -28063,515 +26895,347 @@ "GIT_CONFIG_LEVEL_XDG", "GIT_CONFIG_LEVEL_GLOBAL", "GIT_CONFIG_LEVEL_LOCAL", + "GIT_CONFIG_LEVEL_WORKTREE", "GIT_CONFIG_LEVEL_APP", "GIT_CONFIG_HIGHEST_LEVEL" ], "type": "enum", - "file": "config.h", - "line": 31, - "lineto": 59, - "block": "GIT_CONFIG_LEVEL_PROGRAMDATA\nGIT_CONFIG_LEVEL_SYSTEM\nGIT_CONFIG_LEVEL_XDG\nGIT_CONFIG_LEVEL_GLOBAL\nGIT_CONFIG_LEVEL_LOCAL\nGIT_CONFIG_LEVEL_APP\nGIT_CONFIG_HIGHEST_LEVEL", + "file": "git2/config.h", + "line": 49, + "lineto": 98, + "block": "GIT_CONFIG_LEVEL_PROGRAMDATA\nGIT_CONFIG_LEVEL_SYSTEM\nGIT_CONFIG_LEVEL_XDG\nGIT_CONFIG_LEVEL_GLOBAL\nGIT_CONFIG_LEVEL_LOCAL\nGIT_CONFIG_LEVEL_WORKTREE\nGIT_CONFIG_LEVEL_APP\nGIT_CONFIG_HIGHEST_LEVEL", "tdef": "typedef", - "description": " Priority level of a config file.\n These priority levels correspond to the natural escalation logic\n (from higher to lower) when searching for config entries in git.git.", - "comments": "

git_config_open_default() and git_repository_config() honor those priority levels as well.

\n", + "description": " Priority level of a config file.", + "comments": "

These priority levels correspond to the natural escalation logic (from higher to lower) when reading or searching for config entries in git.git. Meaning that for the same key, the configuration in the local configuration is preferred over the configuration in the system configuration file.

\n\n

Callers can add their own custom configuration, beginning at the GIT_CONFIG_LEVEL_APP level.

\n\n

Writes, by default, occur in the highest priority level backend that is writable. This ordering can be overridden with git_config_set_writeorder.

\n\n

git_config_open_default() and git_repository_config() honor those priority levels as well.

\n", "fields": [ { "type": "int", "name": "GIT_CONFIG_LEVEL_PROGRAMDATA", - "comments": "

System-wide on Windows, for compatibility with portable git

\n", + "comments": "

System-wide on Windows, for compatibility with "Portable Git".

\n", "value": 1 }, { "type": "int", "name": "GIT_CONFIG_LEVEL_SYSTEM", - "comments": "

System-wide configuration file; /etc/gitconfig on Linux systems

\n", + "comments": "

System-wide configuration file; /etc/gitconfig on Linux.

\n", "value": 2 }, { "type": "int", "name": "GIT_CONFIG_LEVEL_XDG", - "comments": "

XDG compatible configuration file; typically ~/.config/git/config

\n", + "comments": "

XDG compatible configuration file; typically\n ~/.config/git/config.

\n", "value": 3 }, { "type": "int", "name": "GIT_CONFIG_LEVEL_GLOBAL", - "comments": "

User-specific configuration file (also called Global configuration\n file); typically ~/.gitconfig

\n", + "comments": "

Global configuration file is the user-specific configuration;\n typically ~/.gitconfig.

\n", "value": 4 }, { "type": "int", "name": "GIT_CONFIG_LEVEL_LOCAL", - "comments": "

Repository specific configuration file; $WORK_DIR/.git/config on\n non-bare repos

\n", + "comments": "

Local configuration, the repository-specific configuration file;\n typically $GIT_DIR/config.

\n", "value": 5 }, { "type": "int", - "name": "GIT_CONFIG_LEVEL_APP", - "comments": "

Application specific configuration file; freely defined by applications

\n", + "name": "GIT_CONFIG_LEVEL_WORKTREE", + "comments": "

Worktree-specific configuration; typically\n $GIT_DIR/config.worktree.

\n", "value": 6 }, + { + "type": "int", + "name": "GIT_CONFIG_LEVEL_APP", + "comments": "

Application-specific configuration file. Callers into libgit2\n can add their own configuration beginning at this level.

\n", + "value": 7 + }, { "type": "int", "name": "GIT_CONFIG_HIGHEST_LEVEL", - "comments": "

Represents the highest level available config file (i.e. the most\n specific config file available that actually is loaded)

\n", + "comments": "

Not a configuration level; callers can use this value when\n querying configuration levels to specify that they want to\n have data from the highest-level currently configuration.\n This can be used to indicate that callers want the most\n specific config file available that actually is loaded.

\n", "value": -1 } ], "used": { "returns": [], "needs": [ - "git_config_add_backend", "git_config_add_file_ondisk", - "git_config_open_level" + "git_config_open_level", + "git_config_set_writeorder" ] } } ], [ - "git_cred_default", - { - "decl": "git_cred_default", - "type": "struct", - "value": "git_cred_default", - "file": "transport.h", - "line": 176, - "lineto": 176, - "tdef": "typedef", - "description": " A key for NTLM/Kerberos \"default\" credentials ", - "comments": "", - "used": { - "returns": [], - "needs": [] - } - } - ], - [ - "git_cred_ssh_custom", + "git_configmap", { "decl": [ - "git_cred parent", - "char * username", - "char * publickey", - "size_t publickey_len", - "git_cred_sign_callback sign_callback", - "void * payload" + "git_configmap_t type", + "const char * str_match", + "int map_value" ], "type": "struct", - "value": "git_cred_ssh_custom", - "file": "transport.h", - "line": 166, - "lineto": 173, - "block": "git_cred parent\nchar * username\nchar * publickey\nsize_t publickey_len\ngit_cred_sign_callback sign_callback\nvoid * payload", + "value": "git_configmap", + "file": "git2/config.h", + "line": 160, + "lineto": 164, + "block": "git_configmap_t type\nconst char * str_match\nint map_value", "tdef": "typedef", - "description": " A key with a custom signature function", - "comments": "", - "fields": [ - { - "type": "git_cred", - "name": "parent", - "comments": "" - }, - { - "type": "char *", - "name": "username", - "comments": "" - }, - { - "type": "char *", - "name": "publickey", - "comments": "" - }, - { - "type": "size_t", - "name": "publickey_len", - "comments": "" - }, - { - "type": "git_cred_sign_callback", - "name": "sign_callback", - "comments": "" - }, - { - "type": "void *", - "name": "payload", - "comments": "" - } + "description": " Mapping from config variables to values.", + "comments": "", + "fields": [ + { "type": "git_configmap_t", "name": "type", "comments": "" }, + { "type": "const char *", "name": "str_match", "comments": "" }, + { "type": "int", "name": "map_value", "comments": "" } ], "used": { "returns": [], - "needs": [] + "needs": ["git_config_get_mapped", "git_config_lookup_map_value"] } } ], [ - "git_cred_ssh_interactive", + "git_configmap_t", { "decl": [ - "git_cred parent", - "char * username", - "git_cred_ssh_interactive_callback prompt_callback", - "void * payload" + "GIT_CONFIGMAP_FALSE", + "GIT_CONFIGMAP_TRUE", + "GIT_CONFIGMAP_INT32", + "GIT_CONFIGMAP_STRING" ], - "type": "struct", - "value": "git_cred_ssh_interactive", - "file": "transport.h", - "line": 156, - "lineto": 161, - "block": "git_cred parent\nchar * username\ngit_cred_ssh_interactive_callback prompt_callback\nvoid * payload", + "type": "enum", + "file": "git2/config.h", + "line": 150, + "lineto": 155, + "block": "GIT_CONFIGMAP_FALSE\nGIT_CONFIGMAP_TRUE\nGIT_CONFIGMAP_INT32\nGIT_CONFIGMAP_STRING", "tdef": "typedef", - "description": " Keyboard-interactive based ssh authentication", + "description": " Config var type", "comments": "", "fields": [ { - "type": "git_cred", - "name": "parent", - "comments": "" + "type": "int", + "name": "GIT_CONFIGMAP_FALSE", + "comments": "", + "value": 0 }, { - "type": "char *", - "name": "username", - "comments": "" + "type": "int", + "name": "GIT_CONFIGMAP_TRUE", + "comments": "", + "value": 1 }, { - "type": "git_cred_ssh_interactive_callback", - "name": "prompt_callback", - "comments": "" + "type": "int", + "name": "GIT_CONFIGMAP_INT32", + "comments": "", + "value": 2 }, { - "type": "void *", - "name": "payload", - "comments": "" + "type": "int", + "name": "GIT_CONFIGMAP_STRING", + "comments": "", + "value": 3 } ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_credential", + { + "decl": "git_credential", + "type": "struct", + "value": "git_credential", + "file": "git2/credential.h", + "line": 87, + "lineto": 87, + "tdef": "typedef", + "description": " The base structure for all credential types", + "comments": "", "used": { "returns": [], "needs": [ - "git_cred_ssh_interactive_new" + "git_credential_acquire_cb", + "git_credential_default_new", + "git_credential_free", + "git_credential_get_username", + "git_credential_has_username", + "git_credential_ssh_custom_new", + "git_credential_ssh_interactive_new", + "git_credential_ssh_key_from_agent", + "git_credential_ssh_key_memory_new", + "git_credential_ssh_key_new", + "git_credential_username_new", + "git_credential_userpass", + "git_credential_userpass_plaintext_new" ] } } ], [ - "git_cred_ssh_key", + "git_credential_default", { - "decl": [ - "git_cred parent", - "char * username", - "char * publickey", - "char * privatekey", - "char * passphrase" - ], + "decl": "git_credential_default", "type": "struct", - "value": "git_cred_ssh_key", - "file": "transport.h", - "line": 145, - "lineto": 151, - "block": "git_cred parent\nchar * username\nchar * publickey\nchar * privatekey\nchar * passphrase", + "value": "git_credential_default", + "file": "git2/credential.h", + "line": 95, + "lineto": 95, "tdef": "typedef", - "description": " A ssh key from disk", + "description": " A key for NTLM/Kerberos \"default\" credentials ", "comments": "", - "fields": [ - { - "type": "git_cred", - "name": "parent", - "comments": "" - }, - { - "type": "char *", - "name": "username", - "comments": "" - }, - { - "type": "char *", - "name": "publickey", - "comments": "" - }, - { - "type": "char *", - "name": "privatekey", - "comments": "" - }, - { - "type": "char *", - "name": "passphrase", - "comments": "" - } - ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_cred_username", + "git_credential_ssh_custom", { - "decl": [ - "git_cred parent", - "char [1] username" - ], + "decl": "git_credential_ssh_custom", "type": "struct", - "value": "git_cred_username", - "file": "transport.h", - "line": 179, - "lineto": 182, - "block": "git_cred parent\nchar [1] username", + "value": "git_credential_ssh_custom", + "file": "git2/credential.h", + "line": 110, + "lineto": 110, "tdef": "typedef", - "description": " Username-only credential information ", + "description": " A key with a custom signature function", "comments": "", - "fields": [ - { - "type": "git_cred", - "name": "parent", - "comments": "" - }, - { - "type": "char [1]", - "name": "username", - "comments": "" - } - ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_cred_userpass_payload", + "git_credential_ssh_interactive", { - "decl": [ - "const char * username", - "const char * password" - ], + "decl": "git_credential_ssh_interactive", "type": "struct", - "value": "git_cred_userpass_payload", - "file": "cred_helpers.h", - "line": 24, - "lineto": 27, - "block": "const char * username\nconst char * password", + "value": "git_credential_ssh_interactive", + "file": "git2/credential.h", + "line": 105, + "lineto": 105, "tdef": "typedef", - "description": " Payload for git_cred_stock_userpass_plaintext.", + "description": " Keyboard-interactive based ssh authentication", "comments": "", - "fields": [ - { - "type": "const char *", - "name": "username", - "comments": "" - }, - { - "type": "const char *", - "name": "password", - "comments": "" - } - ], "used": { "returns": [], - "needs": [] + "needs": ["git_credential_ssh_interactive_new"] } } ], [ - "git_cred_userpass_plaintext", + "git_credential_ssh_key", { - "decl": [ - "git_cred parent", - "char * username", - "char * password" - ], + "decl": "git_credential_ssh_key", "type": "struct", - "value": "git_cred_userpass_plaintext", - "file": "transport.h", - "line": 122, - "lineto": 126, - "block": "git_cred parent\nchar * username\nchar * password", + "value": "git_credential_ssh_key", + "file": "git2/credential.h", + "line": 100, + "lineto": 100, "tdef": "typedef", - "description": " A plaintext username and password ", + "description": " A ssh key from disk", "comments": "", - "fields": [ - { - "type": "git_cred", - "name": "parent", - "comments": "" - }, - { - "type": "char *", - "name": "username", - "comments": "" - }, - { - "type": "char *", - "name": "password", - "comments": "" - } - ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_credtype_t", + "git_credential_t", { "decl": [ - "GIT_CREDTYPE_USERPASS_PLAINTEXT", - "GIT_CREDTYPE_SSH_KEY", - "GIT_CREDTYPE_SSH_CUSTOM", - "GIT_CREDTYPE_DEFAULT", - "GIT_CREDTYPE_SSH_INTERACTIVE", - "GIT_CREDTYPE_USERNAME", - "GIT_CREDTYPE_SSH_MEMORY" + "GIT_CREDENTIAL_USERPASS_PLAINTEXT", + "GIT_CREDENTIAL_SSH_KEY", + "GIT_CREDENTIAL_SSH_CUSTOM", + "GIT_CREDENTIAL_DEFAULT", + "GIT_CREDENTIAL_SSH_INTERACTIVE", + "GIT_CREDENTIAL_USERNAME", + "GIT_CREDENTIAL_SSH_MEMORY" ], "type": "enum", - "file": "transport.h", - "line": 81, - "lineto": 111, - "block": "GIT_CREDTYPE_USERPASS_PLAINTEXT\nGIT_CREDTYPE_SSH_KEY\nGIT_CREDTYPE_SSH_CUSTOM\nGIT_CREDTYPE_DEFAULT\nGIT_CREDTYPE_SSH_INTERACTIVE\nGIT_CREDTYPE_USERNAME\nGIT_CREDTYPE_SSH_MEMORY", + "file": "git2/credential.h", + "line": 30, + "lineto": 82, + "block": "GIT_CREDENTIAL_USERPASS_PLAINTEXT\nGIT_CREDENTIAL_SSH_KEY\nGIT_CREDENTIAL_SSH_CUSTOM\nGIT_CREDENTIAL_DEFAULT\nGIT_CREDENTIAL_SSH_INTERACTIVE\nGIT_CREDENTIAL_USERNAME\nGIT_CREDENTIAL_SSH_MEMORY", "tdef": "typedef", - "description": " Authentication type requested ", - "comments": "", + "description": " Supported credential types", + "comments": "

This represents the various types of authentication methods supported by the library.

\n", "fields": [ { "type": "int", - "name": "GIT_CREDTYPE_USERPASS_PLAINTEXT", - "comments": "", + "name": "GIT_CREDENTIAL_USERPASS_PLAINTEXT", + "comments": "

A vanilla user/password request

\n", "value": 1 }, { "type": "int", - "name": "GIT_CREDTYPE_SSH_KEY", - "comments": "", + "name": "GIT_CREDENTIAL_SSH_KEY", + "comments": "

An SSH key-based authentication request

\n", "value": 2 }, { "type": "int", - "name": "GIT_CREDTYPE_SSH_CUSTOM", - "comments": "", + "name": "GIT_CREDENTIAL_SSH_CUSTOM", + "comments": "

An SSH key-based authentication request, with a custom signature

\n", "value": 4 }, { "type": "int", - "name": "GIT_CREDTYPE_DEFAULT", - "comments": "", + "name": "GIT_CREDENTIAL_DEFAULT", + "comments": "

An NTLM/Negotiate-based authentication request.

\n", "value": 8 }, { "type": "int", - "name": "GIT_CREDTYPE_SSH_INTERACTIVE", - "comments": "", + "name": "GIT_CREDENTIAL_SSH_INTERACTIVE", + "comments": "

An SSH interactive authentication request

\n", "value": 16 }, { "type": "int", - "name": "GIT_CREDTYPE_USERNAME", - "comments": "

Username-only information

\n\n

If the SSH transport does not know which username to use,\n it will ask via this credential type.

\n", + "name": "GIT_CREDENTIAL_USERNAME", + "comments": "

Username-only authentication request

\n\n

Used as a pre-authentication step if the underlying transport\n (eg. SSH, with no username in its URL) does not know which username\n to use.

\n", "value": 32 }, { "type": "int", - "name": "GIT_CREDTYPE_SSH_MEMORY", - "comments": "

Credentials read from memory.

\n\n

Only available for libssh2+OpenSSL for now.

\n", + "name": "GIT_CREDENTIAL_SSH_MEMORY", + "comments": "

An SSH key-based authentication request

\n\n

Allows credentials to be read from memory instead of files.\n Note that because of differences in crypto backend support, it might\n not be functional.

\n", "value": 64 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_cvar_map", + "git_credential_username", { - "decl": [ - "git_cvar_t cvar_type", - "const char * str_match", - "int map_value" - ], + "decl": "git_credential_username", "type": "struct", - "value": "git_cvar_map", - "file": "config.h", - "line": 93, - "lineto": 97, - "block": "git_cvar_t cvar_type\nconst char * str_match\nint map_value", + "value": "git_credential_username", + "file": "git2/credential.h", + "line": 92, + "lineto": 92, "tdef": "typedef", - "description": " Mapping from config variables to values.", + "description": " Username-only credential information ", "comments": "", - "fields": [ - { - "type": "git_cvar_t", - "name": "cvar_type", - "comments": "" - }, - { - "type": "const char *", - "name": "str_match", - "comments": "" - }, - { - "type": "int", - "name": "map_value", - "comments": "" - } - ], - "used": { - "returns": [], - "needs": [ - "git_config_get_mapped", - "git_config_lookup_map_value" - ] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_cvar_t", + "git_credential_userpass_payload", { - "decl": [ - "GIT_CVAR_FALSE", - "GIT_CVAR_TRUE", - "GIT_CVAR_INT32", - "GIT_CVAR_STRING" - ], - "type": "enum", - "file": "config.h", - "line": 83, - "lineto": 88, - "block": "GIT_CVAR_FALSE\nGIT_CVAR_TRUE\nGIT_CVAR_INT32\nGIT_CVAR_STRING", + "decl": ["const char * username", "const char * password"], + "type": "struct", + "value": "git_credential_userpass_payload", + "file": "git2/credential_helpers.h", + "line": 24, + "lineto": 27, + "block": "const char * username\nconst char * password", "tdef": "typedef", - "description": " Config var type", + "description": " Payload for git_credential_userpass_plaintext.", "comments": "", "fields": [ - { - "type": "int", - "name": "GIT_CVAR_FALSE", - "comments": "", - "value": 0 - }, - { - "type": "int", - "name": "GIT_CVAR_TRUE", - "comments": "", - "value": 1 - }, - { - "type": "int", - "name": "GIT_CVAR_INT32", - "comments": "", - "value": 2 - }, - { - "type": "int", - "name": "GIT_CVAR_STRING", - "comments": "", - "value": 3 - } + { "type": "const char *", "name": "username", "comments": "" }, + { "type": "const char *", "name": "password", "comments": "" } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -28591,9 +27255,9 @@ "GIT_DELTA_CONFLICTED" ], "type": "enum", - "file": "diff.h", - "line": 252, - "lineto": 264, + "file": "git2/diff.h", + "line": 224, + "lineto": 236, "block": "GIT_DELTA_UNMODIFIED\nGIT_DELTA_ADDED\nGIT_DELTA_DELETED\nGIT_DELTA_MODIFIED\nGIT_DELTA_RENAMED\nGIT_DELTA_COPIED\nGIT_DELTA_IGNORED\nGIT_DELTA_UNTRACKED\nGIT_DELTA_TYPECHANGE\nGIT_DELTA_UNREADABLE\nGIT_DELTA_CONFLICTED", "tdef": "typedef", "description": " What type of change is described by a git_diff_delta?", @@ -28668,10 +27332,7 @@ ], "used": { "returns": [], - "needs": [ - "git_diff_num_deltas_of_type", - "git_diff_status_char" - ] + "needs": ["git_diff_num_deltas_of_type", "git_diff_status_char"] } } ], @@ -28686,19 +27347,15 @@ ], "type": "struct", "value": "git_describe_format_options", - "file": "describe.h", - "line": 78, - "lineto": 98, + "file": "git2/describe.h", + "line": 100, + "lineto": 120, "block": "unsigned int version\nunsigned int abbreviated_size\nint always_use_long_format\nconst char * dirty_suffix", "tdef": "typedef", - "description": " Options for formatting the describe string", - "comments": "", + "description": " Describe format options structure", + "comments": "

Initialize with GIT_DESCRIBE_FORMAT_OPTIONS_INIT. Alternatively, you can use git_describe_format_options_init.

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "unsigned int", "name": "abbreviated_size", @@ -28717,9 +27374,7 @@ ], "used": { "returns": [], - "needs": [ - "git_describe_format" - ] + "needs": ["git_describe_format", "git_describe_format_options_init"] } } ], @@ -28736,19 +27391,15 @@ ], "type": "struct", "value": "git_describe_options", - "file": "describe.h", - "line": 44, - "lineto": 62, + "file": "git2/describe.h", + "line": 47, + "lineto": 65, "block": "unsigned int version\nunsigned int max_candidates_tags\nunsigned int describe_strategy\nconst char * pattern\nint only_follow_first_parent\nint show_commit_oid_as_fallback", "tdef": "typedef", "description": " Describe options structure", - "comments": "

Initialize with GIT_DESCRIBE_OPTIONS_INIT macro to correctly set the version field. E.g.

\n\n
    git_describe_options opts = GIT_DESCRIBE_OPTIONS_INIT;\n
\n", + "comments": "

Initialize with GIT_DESCRIBE_OPTIONS_INIT. Alternatively, you can use git_describe_options_init.

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "unsigned int", "name": "max_candidates_tags", @@ -28759,11 +27410,7 @@ "name": "describe_strategy", "comments": " default: GIT_DESCRIBE_DEFAULT " }, - { - "type": "const char *", - "name": "pattern", - "comments": "" - }, + { "type": "const char *", "name": "pattern", "comments": "" }, { "type": "int", "name": "only_follow_first_parent", @@ -28779,6 +27426,7 @@ "returns": [], "needs": [ "git_describe_commit", + "git_describe_options_init", "git_describe_workdir" ] } @@ -28790,9 +27438,9 @@ "decl": "git_describe_result", "type": "struct", "value": "git_describe_result", - "file": "describe.h", - "line": 111, - "lineto": 111, + "file": "git2/describe.h", + "line": 146, + "lineto": 146, "tdef": "typedef", "description": " A struct that stores the result of a describe operation.", "comments": "", @@ -28816,9 +27464,9 @@ "GIT_DESCRIBE_ALL" ], "type": "enum", - "file": "describe.h", - "line": 30, - "lineto": 34, + "file": "git2/describe.h", + "line": 34, + "lineto": 38, "block": "GIT_DESCRIBE_DEFAULT\nGIT_DESCRIBE_TAGS\nGIT_DESCRIBE_ALL", "tdef": "typedef", "description": " Reference lookup strategy", @@ -28843,10 +27491,7 @@ "value": 2 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -28855,12 +27500,12 @@ "decl": "git_diff", "type": "struct", "value": "git_diff", - "file": "diff.h", - "line": 225, - "lineto": 225, + "file": "git2/diff.h", + "line": 196, + "lineto": 196, "tdef": "typedef", "description": " The diff object that contains all individual file deltas.", - "comments": "

This is an opaque structure which will be allocated by one of the diff generator functions below (such as git_diff_tree_to_tree). You are responsible for releasing the object memory when done, using the git_diff_free() function.

\n", + "comments": "

A diff represents the cumulative list of differences between two snapshots of a repository (possibly filtered by a set of file name patterns).

\n\n

Calculating diffs is generally done in two phases: building a list of diffs then traversing it. This makes is easier to share logic across the various types of diffs (tree vs tree, workdir vs index, etc.), and also allows you to insert optional diff post-processing phases, such as rename detection, in between the steps. When you are done with a diff object, it must be freed.

\n\n

This is an opaque structure which will be allocated by one of the diff generator functions below (such as git_diff_tree_to_tree). You are responsible for releasing the object memory when done, using the git_diff_free() function.

\n", "used": { "returns": [ "git_diff_get_delta", @@ -28868,6 +27513,10 @@ "git_pathspec_match_list_diff_entry" ], "needs": [ + "git_apply", + "git_apply_delta_cb", + "git_apply_hunk_cb", + "git_apply_to_tree", "git_checkout_notify_cb", "git_diff_binary_cb", "git_diff_blob_to_buffer", @@ -28875,31 +27524,28 @@ "git_diff_buffers", "git_diff_commit_as_email", "git_diff_file_cb", - "git_diff_find_init_options", + "git_diff_find_options_init", "git_diff_find_similar", "git_diff_foreach", "git_diff_format_email", - "git_diff_format_email_init_options", + "git_diff_format_email_options_init", "git_diff_free", "git_diff_from_buffer", "git_diff_get_delta", - "git_diff_get_perfdata", "git_diff_get_stats", "git_diff_hunk_cb", "git_diff_index_to_index", "git_diff_index_to_workdir", - "git_diff_init_options", "git_diff_is_sorted_icase", "git_diff_line_cb", "git_diff_merge", "git_diff_notify_cb", "git_diff_num_deltas", "git_diff_num_deltas_of_type", + "git_diff_options_init", "git_diff_patchid", - "git_diff_patchid_init_options", + "git_diff_patchid_options_init", "git_diff_print", - "git_diff_print_callback__to_buf", - "git_diff_print_callback__to_file_handle", "git_diff_progress_cb", "git_diff_stats_deletions", "git_diff_stats_files_changed", @@ -28918,8 +27564,7 @@ "git_patch_get_hunk", "git_patch_get_line_in_hunk", "git_patch_print", - "git_pathspec_match_diff", - "git_status_list_get_perfdata" + "git_pathspec_match_diff" ] } } @@ -28934,18 +27579,18 @@ ], "type": "struct", "value": "git_diff_binary", - "file": "diff.h", - "line": 498, - "lineto": 509, + "file": "git2/diff.h", + "line": 553, + "lineto": 565, "block": "unsigned int contains_data\ngit_diff_binary_file old_file\ngit_diff_binary_file new_file", "tdef": "typedef", - "description": " Structure describing the binary contents of a diff. ", - "comments": "", + "description": " Structure describing the binary contents of a diff.", + "comments": "

A binary file / delta is a file (or pair) for which no text diffs should be generated. A diff can contain delta entries that are binary, but no diff content will be output for those files. There is a base heuristic for binary detection and you can further tune the behavior with git attributes or diff flags and option settings.

\n", "fields": [ { "type": "unsigned int", "name": "contains_data", - "comments": " Whether there is data in this binary structure or not. If this\n is `1`, then this was produced and included binary content. If\n this is `0` then this was generated knowing only that a binary\n file changed but without providing the data, probably from a patch\n that said `Binary files a/file.txt and b/file.txt differ`." + "comments": " Whether there is data in this binary structure or not.\n\n If this is `1`, then this was produced and included binary content.\n If this is `0` then this was generated knowing only that a binary\n file changed but without providing the data, probably from a patch\n that said `Binary files a/file.txt and b/file.txt differ`." }, { "type": "git_diff_binary_file", @@ -28981,9 +27626,9 @@ ], "type": "struct", "value": "git_diff_binary_file", - "file": "diff.h", - "line": 483, - "lineto": 495, + "file": "git2/diff.h", + "line": 530, + "lineto": 542, "block": "git_diff_binary_t type\nconst char * data\nsize_t datalen\nsize_t inflatedlen", "tdef": "typedef", "description": " The contents of one of the files in a binary diff. ", @@ -29010,10 +27655,7 @@ "comments": " The length of the binary data after inflation. " } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -29025,9 +27667,9 @@ "GIT_DIFF_BINARY_DELTA" ], "type": "enum", - "file": "diff.h", - "line": 471, - "lineto": 480, + "file": "git2/diff.h", + "line": 518, + "lineto": 527, "block": "GIT_DIFF_BINARY_NONE\nGIT_DIFF_BINARY_LITERAL\nGIT_DIFF_BINARY_DELTA", "tdef": "typedef", "description": " When producing a binary diff, the binary data returned will be\n either the deflated full (\"literal\") contents of the file, or\n the deflated binary delta between the two sides (whichever is\n smaller).", @@ -29052,10 +27694,7 @@ "value": 2 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -29071,19 +27710,15 @@ ], "type": "struct", "value": "git_diff_delta", - "file": "diff.h", - "line": 337, - "lineto": 344, + "file": "git2/diff.h", + "line": 324, + "lineto": 331, "block": "git_delta_t status\nuint32_t flags\nuint16_t similarity\nuint16_t nfiles\ngit_diff_file old_file\ngit_diff_file new_file", "tdef": "typedef", "description": " Description of changes to one entry.", - "comments": "

When iterating over a diff, this will be passed to most callbacks and you can use the contents to understand exactly what has changed.

\n\n

The old_file represents the "from" side of the diff and the new_file represents to "to" side of the diff. What those means depend on the function that was used to generate the diff and will be documented below. You can also use the GIT_DIFF_REVERSE flag to flip it around.

\n\n

Although the two sides of the delta are named "old_file" and "new_file", they actually may correspond to entries that represent a file, a symbolic link, a submodule commit id, or even a tree (if you are tracking type changes or ignored/untracked directories).

\n\n

Under some circumstances, in the name of efficiency, not all fields will be filled in, but we generally try to fill in as much as possible. One example is that the "flags" field may not have either the BINARY or the NOT_BINARY flag set to avoid examining file contents if you do not pass in hunk and/or line callbacks to the diff foreach iteration function. It will just use the git attributes for those files.

\n\n

The similarity score is zero unless you call git_diff_find_similar() which does a similarity analysis of files in the diff. Use that function to do rename and copy detection, and to split heavily modified files in add/delete pairs. After that call, deltas with a status of GIT_DELTA_RENAMED or GIT_DELTA_COPIED will have a similarity score between 0 and 100 indicating how similar the old and new sides are.

\n\n

If you ask git_diff_find_similar to find heavily modified files to break, but to not actually break the records, then GIT_DELTA_MODIFIED records may have a non-zero similarity score if the self-similarity is below the split threshold. To display this value like core Git, invert the score (a la printf("M%03d", 100 - delta->similarity)).

\n", + "comments": "

A delta is a file pair with an old and new revision. The old version may be absent if the file was just created and the new version may be absent if the file was deleted. A diff is mostly just a list of deltas.

\n\n

When iterating over a diff, this will be passed to most callbacks and you can use the contents to understand exactly what has changed.

\n\n

The old_file represents the "from" side of the diff and the new_file represents to "to" side of the diff. What those means depend on the function that was used to generate the diff and will be documented below. You can also use the GIT_DIFF_REVERSE flag to flip it around.

\n\n

Although the two sides of the delta are named "old_file" and "new_file", they actually may correspond to entries that represent a file, a symbolic link, a submodule commit id, or even a tree (if you are tracking type changes or ignored/untracked directories).

\n\n

Under some circumstances, in the name of efficiency, not all fields will be filled in, but we generally try to fill in as much as possible. One example is that the "flags" field may not have either the BINARY or the NOT_BINARY flag set to avoid examining file contents if you do not pass in hunk and/or line callbacks to the diff foreach iteration function. It will just use the git attributes for those files.

\n\n

The similarity score is zero unless you call git_diff_find_similar() which does a similarity analysis of files in the diff. Use that function to do rename and copy detection, and to split heavily modified files in add/delete pairs. After that call, deltas with a status of GIT_DELTA_RENAMED or GIT_DELTA_COPIED will have a similarity score between 0 and 100 indicating how similar the old and new sides are.

\n\n

If you ask git_diff_find_similar to find heavily modified files to break, but to not actually break the records, then GIT_DELTA_MODIFIED records may have a non-zero similarity score if the self-similarity is below the split threshold. To display this value like core Git, invert the score (a la printf("M%03d", 100 - delta->similarity)).

\n", "fields": [ - { - "type": "git_delta_t", - "name": "status", - "comments": "" - }, + { "type": "git_delta_t", "name": "status", "comments": "" }, { "type": "uint32_t", "name": "flags", @@ -29099,16 +27734,8 @@ "name": "nfiles", "comments": " number of files in this delta " }, - { - "type": "git_diff_file", - "name": "old_file", - "comments": "" - }, - { - "type": "git_diff_file", - "name": "new_file", - "comments": "" - } + { "type": "git_diff_file", "name": "old_file", "comments": "" }, + { "type": "git_diff_file", "name": "new_file", "comments": "" } ], "used": { "returns": [ @@ -29117,13 +27744,12 @@ "git_pathspec_match_list_diff_entry" ], "needs": [ + "git_apply_delta_cb", "git_diff_binary_cb", "git_diff_file_cb", "git_diff_hunk_cb", "git_diff_line_cb", - "git_diff_notify_cb", - "git_diff_print_callback__to_buf", - "git_diff_print_callback__to_file_handle" + "git_diff_notify_cb" ] } } @@ -29134,50 +27760,50 @@ "decl": [ "git_oid id", "const char * path", - "git_off_t size", + "git_object_size_t size", "uint32_t flags", "uint16_t mode", "uint16_t id_abbrev" ], "type": "struct", "value": "git_diff_file", - "file": "diff.h", - "line": 292, - "lineto": 299, - "block": "git_oid id\nconst char * path\ngit_off_t size\nuint32_t flags\nuint16_t mode\nuint16_t id_abbrev", + "file": "git2/diff.h", + "line": 245, + "lineto": 282, + "block": "git_oid id\nconst char * path\ngit_object_size_t size\nuint32_t flags\nuint16_t mode\nuint16_t id_abbrev", "tdef": "typedef", "description": " Description of one side of a delta.", - "comments": "

Although this is called a "file", it could represent a file, a symbolic link, a submodule commit id, or even a tree (although that only if you are tracking type changes or ignored/untracked directories).

\n\n

The id is the git_oid of the item. If the entry represents an absent side of a diff (e.g. the old_file of a GIT_DELTA_ADDED delta), then the oid will be zeroes.

\n\n

path is the NUL-terminated path to the entry relative to the working directory of the repository.

\n\n

size is the size of the entry in bytes.

\n\n

flags is a combination of the git_diff_flag_t types

\n\n

mode is, roughly, the stat() st_mode value for the item. This will be restricted to one of the git_filemode_t values.

\n\n

The id_abbrev represents the known length of the id field, when converted to a hex string. It is generally GIT_OID_HEXSZ, unless this delta was created from reading a patch file, in which case it may be abbreviated to something reasonable, like 7 characters.

\n", + "comments": "

Although this is called a "file", it could represent a file, a symbolic link, a submodule commit id, or even a tree (although that only if you are tracking type changes or ignored/untracked directories).

\n", "fields": [ { "type": "git_oid", "name": "id", - "comments": "" + "comments": " The `git_oid` of the item. If the entry represents an\n absent side of a diff (e.g. the `old_file` of a `GIT_DELTA_ADDED` delta),\n then the oid will be zeroes." }, { "type": "const char *", "name": "path", - "comments": "" + "comments": " The NUL-terminated path to the entry relative to the working\n directory of the repository." }, { - "type": "git_off_t", + "type": "git_object_size_t", "name": "size", - "comments": "" + "comments": " The size of the entry in bytes." }, { "type": "uint32_t", "name": "flags", - "comments": "" + "comments": " A combination of the `git_diff_flag_t` types" }, { "type": "uint16_t", "name": "mode", - "comments": "" + "comments": " Roughly, the stat() `st_mode` value for the item. This will\n be restricted to one of the `git_filemode_t` values." }, { "type": "uint16_t", "name": "id_abbrev", - "comments": "" + "comments": " Represents the known length of the `id` field, when\n converted to a hex string. It is generally `GIT_OID_SHA1_HEXSIZE`, unless this\n delta was created from reading a patch file, in which case it may be\n abbreviated to something reasonable, like 7 characters." } ], "used": { @@ -29207,19 +27833,15 @@ ], "type": "struct", "value": "git_diff_find_options", - "file": "diff.h", - "line": 703, - "lineto": 729, + "file": "git2/diff.h", + "line": 774, + "lineto": 828, "block": "unsigned int version\nuint32_t flags\nuint16_t rename_threshold\nuint16_t rename_from_rewrite_threshold\nuint16_t copy_threshold\nuint16_t break_rewrite_threshold\nsize_t rename_limit\ngit_diff_similarity_metric * metric", "tdef": "typedef", "description": " Control behavior of rename and copy detection", - "comments": "

These options mostly mimic parameters that can be passed to git-diff.

\n\n\n\n

The metric option allows you to plug in a custom similarity metric. Set it to NULL for the default internal metric which is based on sampling hashes of ranges of data in the file. The default metric is a pretty good similarity approximation that should work fairly well for both text and binary data, and is pretty fast with fixed memory overhead.

\n", + "comments": "

These options mostly mimic parameters that can be passed to git-diff.

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "uint32_t", "name": "flags", @@ -29228,40 +27850,37 @@ { "type": "uint16_t", "name": "rename_threshold", - "comments": " Similarity to consider a file renamed (default 50) " + "comments": " Threshold above which similar files will be considered renames.\n This is equivalent to the -M option. Defaults to 50." }, { "type": "uint16_t", "name": "rename_from_rewrite_threshold", - "comments": " Similarity of modified to be eligible rename source (default 50) " + "comments": " Threshold below which similar files will be eligible to be a rename source.\n This is equivalent to the first part of the -B option. Defaults to 50." }, { "type": "uint16_t", "name": "copy_threshold", - "comments": " Similarity to consider a file a copy (default 50) " + "comments": " Threshold above which similar files will be considered copies.\n This is equivalent to the -C option. Defaults to 50." }, { "type": "uint16_t", "name": "break_rewrite_threshold", - "comments": " Similarity to split modify into delete/add pair (default 60) " + "comments": " Threshold below which similar files will be split into a delete/add pair.\n This is equivalent to the last part of the -B option. Defaults to 60." }, { "type": "size_t", "name": "rename_limit", - "comments": " Maximum similarity sources to examine for a file (somewhat like\n git-diff's `-l` option or `diff.renameLimit` config) (default 200)" + "comments": " Maximum number of matches to consider for a particular file.\n\n This is a little different from the `-l` option from Git because we\n will still process up to this many matches before abandoning the search.\n Defaults to 1000." }, { "type": "git_diff_similarity_metric *", "name": "metric", - "comments": " Pluggable similarity metric; pass NULL to use internal metric " + "comments": " The `metric` option allows you to plug in a custom similarity metric.\n\n Set it to NULL to use the default internal metric.\n\n The default metric is based on sampling hashes of ranges of data in\n the file, which is a pretty good similarity approximation that should\n work fairly well for both text and binary data while still being\n pretty fast with a fixed memory overhead." } ], "used": { "returns": [], - "needs": [ - "git_diff_find_init_options", - "git_diff_find_similar" - ] + "needs": ["git_diff_find_options_init", "git_diff_find_similar"] } } ], @@ -29287,9 +27906,9 @@ "GIT_DIFF_FIND_REMOVE_UNMODIFIED" ], "type": "enum", - "file": "diff.h", - "line": 597, - "lineto": 666, + "file": "git2/diff.h", + "line": 683, + "lineto": 752, "block": "GIT_DIFF_FIND_BY_CONFIG\nGIT_DIFF_FIND_RENAMES\nGIT_DIFF_FIND_RENAMES_FROM_REWRITES\nGIT_DIFF_FIND_COPIES\nGIT_DIFF_FIND_COPIES_FROM_UNMODIFIED\nGIT_DIFF_FIND_REWRITES\nGIT_DIFF_BREAK_REWRITES\nGIT_DIFF_FIND_AND_BREAK_REWRITES\nGIT_DIFF_FIND_FOR_UNTRACKED\nGIT_DIFF_FIND_ALL\nGIT_DIFF_FIND_IGNORE_LEADING_WHITESPACE\nGIT_DIFF_FIND_IGNORE_WHITESPACE\nGIT_DIFF_FIND_DONT_IGNORE_WHITESPACE\nGIT_DIFF_FIND_EXACT_MATCH_ONLY\nGIT_DIFF_BREAK_REWRITES_FOR_RENAMES_ONLY\nGIT_DIFF_FIND_REMOVE_UNMODIFIED", "tdef": "typedef", "description": " Flags to control the behavior of diff rename/copy detection.", @@ -29392,10 +28011,7 @@ "value": 65536 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -29405,13 +28021,14 @@ "GIT_DIFF_FLAG_BINARY", "GIT_DIFF_FLAG_NOT_BINARY", "GIT_DIFF_FLAG_VALID_ID", - "GIT_DIFF_FLAG_EXISTS" + "GIT_DIFF_FLAG_EXISTS", + "GIT_DIFF_FLAG_VALID_SIZE" ], "type": "enum", - "file": "diff.h", - "line": 235, - "lineto": 240, - "block": "GIT_DIFF_FLAG_BINARY\nGIT_DIFF_FLAG_NOT_BINARY\nGIT_DIFF_FLAG_VALID_ID\nGIT_DIFF_FLAG_EXISTS", + "file": "git2/diff.h", + "line": 206, + "lineto": 212, + "block": "GIT_DIFF_FLAG_BINARY\nGIT_DIFF_FLAG_NOT_BINARY\nGIT_DIFF_FLAG_VALID_ID\nGIT_DIFF_FLAG_EXISTS\nGIT_DIFF_FLAG_VALID_SIZE", "tdef": "typedef", "description": " Flags for the delta object and the file objects on each side.", "comments": "

These flags are used for both the flags value of the git_diff_delta and the flags for the git_diff_file objects representing the old and new sides of the delta. Values outside of this public range should be considered reserved for internal or future use.

\n", @@ -29439,12 +28056,15 @@ "name": "GIT_DIFF_FLAG_EXISTS", "comments": "

file exists at this side of the delta

\n", "value": 8 + }, + { + "type": "int", + "name": "GIT_DIFF_FLAG_VALID_SIZE", + "comments": "

file size value is known correct

\n", + "value": 16 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -29455,9 +28075,9 @@ "GIT_DIFF_FORMAT_EMAIL_EXCLUDE_SUBJECT_PATCH_MARKER" ], "type": "enum", - "file": "diff.h", - "line": 1321, - "lineto": 1328, + "file": "git2/deprecated.h", + "line": 325, + "lineto": 331, "block": "GIT_DIFF_FORMAT_EMAIL_NONE\nGIT_DIFF_FORMAT_EMAIL_EXCLUDE_SUBJECT_PATCH_MARKER", "tdef": "typedef", "description": " Formatting options for diff e-mail generation", @@ -29476,12 +28096,7 @@ "value": 1 } ], - "used": { - "returns": [], - "needs": [ - "git_diff_commit_as_email" - ] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -29489,7 +28104,7 @@ { "decl": [ "unsigned int version", - "git_diff_format_email_flags_t flags", + "uint32_t flags", "size_t patch_no", "size_t total_patches", "const git_oid * id", @@ -29499,23 +28114,19 @@ ], "type": "struct", "value": "git_diff_format_email_options", - "file": "diff.h", - "line": 1333, - "lineto": 1355, - "block": "unsigned int version\ngit_diff_format_email_flags_t flags\nsize_t patch_no\nsize_t total_patches\nconst git_oid * id\nconst char * summary\nconst char * body\nconst git_signature * author", + "file": "git2/deprecated.h", + "line": 338, + "lineto": 361, + "block": "unsigned int version\nuint32_t flags\nsize_t patch_no\nsize_t total_patches\nconst git_oid * id\nconst char * summary\nconst char * body\nconst git_signature * author", "tdef": "typedef", "description": " Options for controlling the formatting of the generated e-mail.", "comments": "", "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, { - "type": "unsigned int", - "name": "version", - "comments": "" - }, - { - "type": "git_diff_format_email_flags_t", + "type": "uint32_t", "name": "flags", - "comments": "" + "comments": " see `git_diff_format_email_flags_t` above " }, { "type": "size_t", @@ -29552,7 +28163,7 @@ "returns": [], "needs": [ "git_diff_format_email", - "git_diff_format_email_init_options" + "git_diff_format_email_options_init" ] } } @@ -29565,13 +28176,14 @@ "GIT_DIFF_FORMAT_PATCH_HEADER", "GIT_DIFF_FORMAT_RAW", "GIT_DIFF_FORMAT_NAME_ONLY", - "GIT_DIFF_FORMAT_NAME_STATUS" + "GIT_DIFF_FORMAT_NAME_STATUS", + "GIT_DIFF_FORMAT_PATCH_ID" ], "type": "enum", - "file": "diff.h", - "line": 1045, - "lineto": 1051, - "block": "GIT_DIFF_FORMAT_PATCH\nGIT_DIFF_FORMAT_PATCH_HEADER\nGIT_DIFF_FORMAT_RAW\nGIT_DIFF_FORMAT_NAME_ONLY\nGIT_DIFF_FORMAT_NAME_STATUS", + "file": "git2/diff.h", + "line": 1156, + "lineto": 1163, + "block": "GIT_DIFF_FORMAT_PATCH\nGIT_DIFF_FORMAT_PATCH_HEADER\nGIT_DIFF_FORMAT_RAW\nGIT_DIFF_FORMAT_NAME_ONLY\nGIT_DIFF_FORMAT_NAME_STATUS\nGIT_DIFF_FORMAT_PATCH_ID", "tdef": "typedef", "description": " Possible output formats for diff data", "comments": "", @@ -29605,14 +28217,17 @@ "name": "GIT_DIFF_FORMAT_NAME_STATUS", "comments": "

like git diff --name-status

\n", "value": 5 + }, + { + "type": "int", + "name": "GIT_DIFF_FORMAT_PATCH_ID", + "comments": "

git diff as used by git patch-id

\n", + "value": 6 } ], "used": { "returns": [], - "needs": [ - "git_diff_print", - "git_diff_to_buf" - ] + "needs": ["git_diff_print", "git_diff_to_buf"] } } ], @@ -29629,13 +28244,13 @@ ], "type": "struct", "value": "git_diff_hunk", - "file": "diff.h", - "line": 523, - "lineto": 530, + "file": "git2/diff.h", + "line": 590, + "lineto": 597, "block": "int old_start\nint old_lines\nint new_start\nint new_lines\nsize_t header_len\nchar [128] header", "tdef": "typedef", "description": " Structure describing a hunk of a diff.", - "comments": "", + "comments": "

A hunk is a span of modified lines in a delta along with some stable surrounding context. You can configure the amount of context and other properties of how hunks are generated. Each hunk also comes with a header that described where it starts and ends in both the old and new versions in the delta.

\n", "fields": [ { "type": "int", @@ -29671,14 +28286,13 @@ "used": { "returns": [], "needs": [ + "git_apply_hunk_cb", "git_diff_blob_to_buffer", "git_diff_blobs", "git_diff_buffers", "git_diff_foreach", "git_diff_hunk_cb", "git_diff_line_cb", - "git_diff_print_callback__to_buf", - "git_diff_print_callback__to_file_handle", "git_patch_get_hunk" ] } @@ -29698,13 +28312,13 @@ ], "type": "struct", "value": "git_diff_line", - "file": "diff.h", - "line": 570, - "lineto": 578, + "file": "git2/diff.h", + "line": 650, + "lineto": 658, "block": "char origin\nint old_lineno\nint new_lineno\nint num_lines\nsize_t content_len\ngit_off_t content_offset\nconst char * content", "tdef": "typedef", "description": " Structure describing a line (or data span) of a diff.", - "comments": "", + "comments": "

A line is a range of characters inside a hunk. It could be a context line (i.e. in both old and new versions), an added line (i.e. only in the new version), or a removed line (i.e. only in the old version). Unfortunately, we don't know anything about the encoding of data in the file being diffed, so we cannot tell you much about the line content. Line data will not be NUL-byte terminated, however, because it will be just a span of bytes inside the larger file.

\n", "fields": [ { "type": "char", @@ -29751,8 +28365,6 @@ "git_diff_foreach", "git_diff_line_cb", "git_diff_print", - "git_diff_print_callback__to_buf", - "git_diff_print_callback__to_file_handle", "git_patch_get_line_in_hunk", "git_patch_print" ] @@ -29774,9 +28386,9 @@ "GIT_DIFF_LINE_BINARY" ], "type": "enum", - "file": "diff.h", - "line": 549, - "lineto": 565, + "file": "git2/diff.h", + "line": 621, + "lineto": 637, "block": "GIT_DIFF_LINE_CONTEXT\nGIT_DIFF_LINE_ADDITION\nGIT_DIFF_LINE_DELETION\nGIT_DIFF_LINE_CONTEXT_EOFNL\nGIT_DIFF_LINE_ADD_EOFNL\nGIT_DIFF_LINE_DEL_EOFNL\nGIT_DIFF_LINE_FILE_HDR\nGIT_DIFF_LINE_HUNK_HDR\nGIT_DIFF_LINE_BINARY", "tdef": "typedef", "description": " Line origin constants.", @@ -29837,10 +28449,7 @@ "value": 66 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -29866,6 +28475,8 @@ "GIT_DIFF_UPDATE_INDEX", "GIT_DIFF_INCLUDE_UNREADABLE", "GIT_DIFF_INCLUDE_UNREADABLE_AS_UNTRACKED", + "GIT_DIFF_INDENT_HEURISTIC", + "GIT_DIFF_IGNORE_BLANK_LINES", "GIT_DIFF_FORCE_TEXT", "GIT_DIFF_FORCE_BINARY", "GIT_DIFF_IGNORE_WHITESPACE", @@ -29875,14 +28486,13 @@ "GIT_DIFF_SHOW_UNMODIFIED", "GIT_DIFF_PATIENCE", "GIT_DIFF_MINIMAL", - "GIT_DIFF_SHOW_BINARY", - "GIT_DIFF_INDENT_HEURISTIC" + "GIT_DIFF_SHOW_BINARY" ], "type": "enum", - "file": "diff.h", - "line": 72, - "lineto": 215, - "block": "GIT_DIFF_NORMAL\nGIT_DIFF_REVERSE\nGIT_DIFF_INCLUDE_IGNORED\nGIT_DIFF_RECURSE_IGNORED_DIRS\nGIT_DIFF_INCLUDE_UNTRACKED\nGIT_DIFF_RECURSE_UNTRACKED_DIRS\nGIT_DIFF_INCLUDE_UNMODIFIED\nGIT_DIFF_INCLUDE_TYPECHANGE\nGIT_DIFF_INCLUDE_TYPECHANGE_TREES\nGIT_DIFF_IGNORE_FILEMODE\nGIT_DIFF_IGNORE_SUBMODULES\nGIT_DIFF_IGNORE_CASE\nGIT_DIFF_INCLUDE_CASECHANGE\nGIT_DIFF_DISABLE_PATHSPEC_MATCH\nGIT_DIFF_SKIP_BINARY_CHECK\nGIT_DIFF_ENABLE_FAST_UNTRACKED_DIRS\nGIT_DIFF_UPDATE_INDEX\nGIT_DIFF_INCLUDE_UNREADABLE\nGIT_DIFF_INCLUDE_UNREADABLE_AS_UNTRACKED\nGIT_DIFF_FORCE_TEXT\nGIT_DIFF_FORCE_BINARY\nGIT_DIFF_IGNORE_WHITESPACE\nGIT_DIFF_IGNORE_WHITESPACE_CHANGE\nGIT_DIFF_IGNORE_WHITESPACE_EOL\nGIT_DIFF_SHOW_UNTRACKED_CONTENT\nGIT_DIFF_SHOW_UNMODIFIED\nGIT_DIFF_PATIENCE\nGIT_DIFF_MINIMAL\nGIT_DIFF_SHOW_BINARY\nGIT_DIFF_INDENT_HEURISTIC", + "file": "git2/diff.h", + "line": 28, + "lineto": 174, + "block": "GIT_DIFF_NORMAL\nGIT_DIFF_REVERSE\nGIT_DIFF_INCLUDE_IGNORED\nGIT_DIFF_RECURSE_IGNORED_DIRS\nGIT_DIFF_INCLUDE_UNTRACKED\nGIT_DIFF_RECURSE_UNTRACKED_DIRS\nGIT_DIFF_INCLUDE_UNMODIFIED\nGIT_DIFF_INCLUDE_TYPECHANGE\nGIT_DIFF_INCLUDE_TYPECHANGE_TREES\nGIT_DIFF_IGNORE_FILEMODE\nGIT_DIFF_IGNORE_SUBMODULES\nGIT_DIFF_IGNORE_CASE\nGIT_DIFF_INCLUDE_CASECHANGE\nGIT_DIFF_DISABLE_PATHSPEC_MATCH\nGIT_DIFF_SKIP_BINARY_CHECK\nGIT_DIFF_ENABLE_FAST_UNTRACKED_DIRS\nGIT_DIFF_UPDATE_INDEX\nGIT_DIFF_INCLUDE_UNREADABLE\nGIT_DIFF_INCLUDE_UNREADABLE_AS_UNTRACKED\nGIT_DIFF_INDENT_HEURISTIC\nGIT_DIFF_IGNORE_BLANK_LINES\nGIT_DIFF_FORCE_TEXT\nGIT_DIFF_FORCE_BINARY\nGIT_DIFF_IGNORE_WHITESPACE\nGIT_DIFF_IGNORE_WHITESPACE_CHANGE\nGIT_DIFF_IGNORE_WHITESPACE_EOL\nGIT_DIFF_SHOW_UNTRACKED_CONTENT\nGIT_DIFF_SHOW_UNMODIFIED\nGIT_DIFF_PATIENCE\nGIT_DIFF_MINIMAL\nGIT_DIFF_SHOW_BINARY", "tdef": "typedef", "description": " Flags for diff options. A combination of these flags can be passed\n in via the `flags` value in the `git_diff_options`.", "comments": "", @@ -30001,6 +28611,18 @@ "comments": "

Include unreadable files in the diff

\n", "value": 131072 }, + { + "type": "int", + "name": "GIT_DIFF_INDENT_HEURISTIC", + "comments": "

Use a heuristic that takes indentation and whitespace into account\n which generally can produce better diffs when dealing with ambiguous\n diff hunks.

\n", + "value": 262144 + }, + { + "type": "int", + "name": "GIT_DIFF_IGNORE_BLANK_LINES", + "comments": "

Ignore blank lines

\n", + "value": 524288 + }, { "type": "int", "name": "GIT_DIFF_FORCE_TEXT", @@ -30060,18 +28682,9 @@ "name": "GIT_DIFF_SHOW_BINARY", "comments": "

Include the necessary deflate / delta information so that git-apply\n can apply given diff information to binary files.

\n", "value": 1073741824 - }, - { - "type": "unsigned int", - "name": "GIT_DIFF_INDENT_HEURISTIC", - "comments": "

Use a heuristic that takes indentation and whitespace into account\n which generally can produce better diffs when dealing with ambiguous\n diff hunks.

\n", - "value": -2147483648 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -30087,6 +28700,7 @@ "void * payload", "uint32_t context_lines", "uint32_t interhunk_lines", + "git_oid_t oid_type", "uint16_t id_abbrev", "git_off_t max_size", "const char * old_prefix", @@ -30094,13 +28708,13 @@ ], "type": "struct", "value": "git_diff_options", - "file": "diff.h", - "line": 408, - "lineto": 428, - "block": "unsigned int version\nuint32_t flags\ngit_submodule_ignore_t ignore_submodules\ngit_strarray pathspec\ngit_diff_notify_cb notify_cb\ngit_diff_progress_cb progress_cb\nvoid * payload\nuint32_t context_lines\nuint32_t interhunk_lines\nuint16_t id_abbrev\ngit_off_t max_size\nconst char * old_prefix\nconst char * new_prefix", + "file": "git2/diff.h", + "line": 383, + "lineto": 471, + "block": "unsigned int version\nuint32_t flags\ngit_submodule_ignore_t ignore_submodules\ngit_strarray pathspec\ngit_diff_notify_cb notify_cb\ngit_diff_progress_cb progress_cb\nvoid * payload\nuint32_t context_lines\nuint32_t interhunk_lines\ngit_oid_t oid_type\nuint16_t id_abbrev\ngit_off_t max_size\nconst char * old_prefix\nconst char * new_prefix", "tdef": "typedef", "description": " Structure describing options about how the diff should be executed.", - "comments": "

Setting all values of the structure to zero will yield the default values. Similarly, passing NULL for the options structure will give the defaults. The default values are marked below.

\n\n\n", + "comments": "

Setting all values of the structure to zero will yield the default values. Similarly, passing NULL for the options structure will give the defaults. The default values are marked below.

\n", "fields": [ { "type": "unsigned int", @@ -30110,62 +28724,67 @@ { "type": "uint32_t", "name": "flags", - "comments": " defaults to GIT_DIFF_NORMAL " + "comments": " A combination of `git_diff_option_t` values above.\n Defaults to GIT_DIFF_NORMAL" }, { "type": "git_submodule_ignore_t", "name": "ignore_submodules", - "comments": " submodule ignore rule " + "comments": " Overrides the submodule ignore setting for all submodules in the diff. " }, { "type": "git_strarray", "name": "pathspec", - "comments": " defaults to include all paths " + "comments": " An array of paths / fnmatch patterns to constrain diff.\n All paths are included by default." }, { "type": "git_diff_notify_cb", "name": "notify_cb", - "comments": "" + "comments": " An optional callback function, notifying the consumer of changes to\n the diff as new deltas are added." }, { "type": "git_diff_progress_cb", "name": "progress_cb", - "comments": "" + "comments": " An optional callback function, notifying the consumer of which files\n are being examined as the diff is generated." }, { "type": "void *", "name": "payload", - "comments": "" + "comments": " The payload to pass to the callback functions. " }, { "type": "uint32_t", "name": "context_lines", - "comments": " defaults to 3 " + "comments": " The number of unchanged lines that define the boundary of a hunk\n (and to display before and after). Defaults to 3." }, { "type": "uint32_t", "name": "interhunk_lines", - "comments": " defaults to 0 " + "comments": " The maximum number of unchanged lines between hunk boundaries before\n the hunks will be merged into one. Defaults to 0." + }, + { + "type": "git_oid_t", + "name": "oid_type", + "comments": " The object ID type to emit in diffs; this is used by functions\n that operate without a repository - namely `git_diff_buffers`,\n or `git_diff_blobs` and `git_diff_blob_to_buffer` when one blob\n is `NULL`.\n\n This may be omitted (set to `0`). If a repository is available,\n the object ID format of the repository will be used. If no\n repository is available then the default is `GIT_OID_SHA`.\n\n If this is specified and a repository is available, then the\n specified `oid_type` must match the repository's object ID\n format." }, { "type": "uint16_t", "name": "id_abbrev", - "comments": " default 'core.abbrev' or 7 if unset " + "comments": " The abbreviation length to use when formatting object ids.\n Defaults to the value of 'core.abbrev' from the config, or 7 if unset." }, { "type": "git_off_t", "name": "max_size", - "comments": " defaults to 512MB " + "comments": " A size (in bytes) above which a blob will be marked as binary\n automatically; pass a negative value to disable.\n Defaults to 512MB." }, { "type": "const char *", "name": "old_prefix", - "comments": " defaults to \"a\" " + "comments": " The virtual \"directory\" prefix for old file names in hunk headers.\n Default is \"a\"." }, { "type": "const char *", "name": "new_prefix", - "comments": " defaults to \"b\" " + "comments": " The virtual \"directory\" prefix for new file names in hunk headers.\n Defaults to \"b\"." } ], "used": { @@ -30177,7 +28796,7 @@ "git_diff_commit_as_email", "git_diff_index_to_index", "git_diff_index_to_workdir", - "git_diff_init_options", + "git_diff_options_init", "git_diff_tree_to_index", "git_diff_tree_to_tree", "git_diff_tree_to_workdir", @@ -30190,76 +28809,44 @@ } ], [ - "git_diff_patchid_options", + "git_diff_parse_options", { - "decl": [ - "unsigned int version" - ], + "decl": ["unsigned int version", "git_oid_t oid_type"], "type": "struct", - "value": "git_diff_patchid_options", - "file": "diff.h", - "line": 1415, - "lineto": 1417, - "block": "unsigned int version", + "value": "git_diff_parse_options", + "file": "git2/diff.h", + "line": 1322, + "lineto": 1325, + "block": "unsigned int version\ngit_oid_t oid_type", "tdef": "typedef", - "description": " Patch ID options structure", - "comments": "

Initialize with GIT_DIFF_PATCHID_OPTIONS_INIT macro to correctly set the default values and version.

\n", + "description": " Options for parsing a diff / patch file.", + "comments": "", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - } + { "type": "unsigned int", "name": "version", "comments": "" }, + { "type": "git_oid_t", "name": "oid_type", "comments": "" } ], - "used": { - "returns": [], - "needs": [ - "git_diff_patchid", - "git_diff_patchid_init_options" - ] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_diff_perfdata", + "git_diff_patchid_options", { - "decl": [ - "unsigned int version", - "size_t stat_calls", - "size_t oid_calculations" - ], + "decl": ["unsigned int version"], "type": "struct", - "value": "git_diff_perfdata", - "file": "sys/diff.h", - "line": 67, - "lineto": 71, - "block": "unsigned int version\nsize_t stat_calls\nsize_t oid_calculations", + "value": "git_diff_patchid_options", + "file": "git2/diff.h", + "line": 1459, + "lineto": 1461, + "block": "unsigned int version", "tdef": "typedef", - "description": " Performance data from diffing", - "comments": "", + "description": " Patch ID options structure", + "comments": "

Initialize with GIT_PATCHID_OPTIONS_INIT. Alternatively, you can use git_diff_patchid_options_init.

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, - { - "type": "size_t", - "name": "stat_calls", - "comments": " Number of stat() calls performed " - }, - { - "type": "size_t", - "name": "oid_calculations", - "comments": " Number of ID calculations " - } + { "type": "unsigned int", "name": "version", "comments": "" } ], "used": { "returns": [], - "needs": [ - "git_diff_get_perfdata", - "git_status_list_get_perfdata" - ] + "needs": ["git_diff_patchid", "git_diff_patchid_options_init"] } } ], @@ -30275,9 +28862,9 @@ ], "type": "struct", "value": "git_diff_similarity_metric", - "file": "diff.h", - "line": 671, - "lineto": 681, + "file": "git2/diff.h", + "line": 757, + "lineto": 767, "block": "int (*)(void **, const git_diff_file *, const char *, void *) file_signature\nint (*)(void **, const git_diff_file *, const char *, size_t, void *) buffer_signature\nvoid (*)(void *, void *) free_signature\nint (*)(int *, void *, void *, void *) similarity\nvoid * payload", "tdef": "typedef", "description": " Pluggable similarity metric", @@ -30303,16 +28890,9 @@ "name": "similarity", "comments": "" }, - { - "type": "void *", - "name": "payload", - "comments": "" - } + { "type": "void *", "name": "payload", "comments": "" } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -30321,9 +28901,9 @@ "decl": "git_diff_stats", "type": "struct", "value": "git_diff_stats", - "file": "diff.h", - "line": 1235, - "lineto": 1235, + "file": "git2/diff.h", + "line": 1369, + "lineto": 1369, "tdef": "typedef", "description": " This is an opaque structure which is allocated by `git_diff_get_stats`.\n You are responsible for releasing the object memory when done, using the\n `git_diff_stats_free()` function.", "comments": "", @@ -30351,9 +28931,9 @@ "GIT_DIFF_STATS_INCLUDE_SUMMARY" ], "type": "enum", - "file": "diff.h", - "line": 1240, - "lineto": 1255, + "file": "git2/diff.h", + "line": 1374, + "lineto": 1389, "block": "GIT_DIFF_STATS_NONE\nGIT_DIFF_STATS_FULL\nGIT_DIFF_STATS_SHORT\nGIT_DIFF_STATS_NUMBER\nGIT_DIFF_STATS_INCLUDE_SUMMARY", "tdef": "typedef", "description": " Formatting options for diff stats", @@ -30390,25 +28970,17 @@ "value": 8 } ], - "used": { - "returns": [], - "needs": [ - "git_diff_stats_to_buf" - ] - } + "used": { "returns": [], "needs": ["git_diff_stats_to_buf"] } } ], [ "git_direction", { - "decl": [ - "GIT_DIRECTION_FETCH", - "GIT_DIRECTION_PUSH" - ], + "decl": ["GIT_DIRECTION_FETCH", "GIT_DIRECTION_PUSH"], "type": "enum", - "file": "net.h", - "line": 31, - "lineto": 34, + "file": "git2/net.h", + "line": 32, + "lineto": 35, "block": "GIT_DIRECTION_FETCH\nGIT_DIRECTION_PUSH", "tdef": "typedef", "description": " Direction of the connection.", @@ -30428,27 +29000,123 @@ } ], "used": { - "returns": [ - "git_refspec_direction" - ], - "needs": [ - "git_remote_connect" - ] + "returns": ["git_refspec_direction"], + "needs": ["git_remote_connect", "git_remote_connect_ext"] } } ], [ - "git_error", + "git_email_create_flags_t", + { + "decl": [ + "GIT_EMAIL_CREATE_DEFAULT", + "GIT_EMAIL_CREATE_OMIT_NUMBERS", + "GIT_EMAIL_CREATE_ALWAYS_NUMBER", + "GIT_EMAIL_CREATE_NO_RENAMES" + ], + "type": "enum", + "file": "git2/email.h", + "line": 24, + "lineto": 39, + "block": "GIT_EMAIL_CREATE_DEFAULT\nGIT_EMAIL_CREATE_OMIT_NUMBERS\nGIT_EMAIL_CREATE_ALWAYS_NUMBER\nGIT_EMAIL_CREATE_NO_RENAMES", + "tdef": "typedef", + "description": " Formatting options for diff e-mail generation", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_EMAIL_CREATE_DEFAULT", + "comments": "

Normal patch, the default

\n", + "value": 0 + }, + { + "type": "int", + "name": "GIT_EMAIL_CREATE_OMIT_NUMBERS", + "comments": "

Do not include patch numbers in the subject prefix.

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_EMAIL_CREATE_ALWAYS_NUMBER", + "comments": "

Include numbers in the subject prefix even when the\n patch is for a single commit (1/1).

\n", + "value": 2 + }, + { + "type": "int", + "name": "GIT_EMAIL_CREATE_NO_RENAMES", + "comments": "

Do not perform rename or similarity detection.

\n", + "value": 4 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_email_create_options", { "decl": [ - "char * message", - "int klass" + "unsigned int version", + "uint32_t flags", + "git_diff_options diff_opts", + "git_diff_find_options diff_find_opts", + "const char * subject_prefix", + "size_t start_number", + "size_t reroll_number" + ], + "type": "struct", + "value": "git_email_create_options", + "file": "git2/email.h", + "line": 44, + "lineto": 72, + "block": "unsigned int version\nuint32_t flags\ngit_diff_options diff_opts\ngit_diff_find_options diff_find_opts\nconst char * subject_prefix\nsize_t start_number\nsize_t reroll_number", + "tdef": "typedef", + "description": " Options for controlling the formatting of the generated e-mail.", + "comments": "", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "uint32_t", + "name": "flags", + "comments": " see `git_email_create_flags_t` above " + }, + { + "type": "git_diff_options", + "name": "diff_opts", + "comments": " Options to use when creating diffs " + }, + { + "type": "git_diff_find_options", + "name": "diff_find_opts", + "comments": " Options for finding similarities within diffs " + }, + { + "type": "const char *", + "name": "subject_prefix", + "comments": " The subject prefix, by default \"PATCH\". If set to an empty\n string (\"\") then only the patch numbers will be shown in the\n prefix. If the subject_prefix is empty and patch numbers\n are not being shown, the prefix will be omitted entirely." + }, + { + "type": "size_t", + "name": "start_number", + "comments": " The starting patch number; this cannot be 0. By default,\n this is 1." + }, + { + "type": "size_t", + "name": "reroll_number", + "comments": " The \"re-roll\" number. By default, there is no re-roll. " + } ], + "used": { "returns": [], "needs": ["git_email_create_from_commit"] } + } + ], + [ + "git_error", + { + "decl": ["char * message", "int klass"], "type": "struct", "value": "git_error", - "file": "errors.h", - "line": 66, - "lineto": 69, + "file": "git2/errors.h", + "line": 125, + "lineto": 128, "block": "char * message\nint klass", "tdef": "typedef", "description": " Structure to store extra details of the last error that occurred.", @@ -30457,20 +29125,15 @@ { "type": "char *", "name": "message", - "comments": "" + "comments": " The error message for the last error. " }, { "type": "int", "name": "klass", - "comments": "" + "comments": " The category of the last error. \n\n git_error_t " } ], - "used": { - "returns": [ - "giterr_last" - ], - "needs": [] - } + "used": { "returns": ["git_error_last", "giterr_last"], "needs": [] } } ], [ @@ -30504,13 +29167,20 @@ "GIT_PASSTHROUGH", "GIT_ITEROVER", "GIT_RETRY", - "GIT_EMISMATCH" + "GIT_EMISMATCH", + "GIT_EINDEXDIRTY", + "GIT_EAPPLYFAIL", + "GIT_EOWNER", + "GIT_TIMEOUT", + "GIT_EUNCHANGED", + "GIT_ENOTSUPPORTED", + "GIT_EREADONLY" ], "type": "enum", - "file": "errors.h", + "file": "git2/errors.h", "line": 21, - "lineto": 58, - "block": "GIT_OK\nGIT_ERROR\nGIT_ENOTFOUND\nGIT_EEXISTS\nGIT_EAMBIGUOUS\nGIT_EBUFS\nGIT_EUSER\nGIT_EBAREREPO\nGIT_EUNBORNBRANCH\nGIT_EUNMERGED\nGIT_ENONFASTFORWARD\nGIT_EINVALIDSPEC\nGIT_ECONFLICT\nGIT_ELOCKED\nGIT_EMODIFIED\nGIT_EAUTH\nGIT_ECERTIFICATE\nGIT_EAPPLIED\nGIT_EPEEL\nGIT_EEOF\nGIT_EINVALID\nGIT_EUNCOMMITTED\nGIT_EDIRECTORY\nGIT_EMERGECONFLICT\nGIT_PASSTHROUGH\nGIT_ITEROVER\nGIT_RETRY\nGIT_EMISMATCH", + "lineto": 73, + "block": "GIT_OK\nGIT_ERROR\nGIT_ENOTFOUND\nGIT_EEXISTS\nGIT_EAMBIGUOUS\nGIT_EBUFS\nGIT_EUSER\nGIT_EBAREREPO\nGIT_EUNBORNBRANCH\nGIT_EUNMERGED\nGIT_ENONFASTFORWARD\nGIT_EINVALIDSPEC\nGIT_ECONFLICT\nGIT_ELOCKED\nGIT_EMODIFIED\nGIT_EAUTH\nGIT_ECERTIFICATE\nGIT_EAPPLIED\nGIT_EPEEL\nGIT_EEOF\nGIT_EINVALID\nGIT_EUNCOMMITTED\nGIT_EDIRECTORY\nGIT_EMERGECONFLICT\nGIT_PASSTHROUGH\nGIT_ITEROVER\nGIT_RETRY\nGIT_EMISMATCH\nGIT_EINDEXDIRTY\nGIT_EAPPLYFAIL\nGIT_EOWNER\nGIT_TIMEOUT\nGIT_EUNCHANGED\nGIT_ENOTSUPPORTED\nGIT_EREADONLY", "tdef": "typedef", "description": " Generic return codes ", "comments": "", @@ -30518,55 +29188,55 @@ { "type": "int", "name": "GIT_OK", - "comments": "

No error

\n", + "comments": "

No error occurred; the call was successful.

\n", "value": 0 }, { "type": "int", "name": "GIT_ERROR", - "comments": "

Generic error

\n", + "comments": "

An error occurred; call git_error_last for more information.

\n", "value": -1 }, { "type": "int", "name": "GIT_ENOTFOUND", - "comments": "

Requested object could not be found

\n", + "comments": "

Requested object could not be found.

\n", "value": -3 }, { "type": "int", "name": "GIT_EEXISTS", - "comments": "

Object exists preventing operation

\n", + "comments": "

Object exists preventing operation.

\n", "value": -4 }, { "type": "int", "name": "GIT_EAMBIGUOUS", - "comments": "

More than one object matches

\n", + "comments": "

More than one object matches.

\n", "value": -5 }, { "type": "int", "name": "GIT_EBUFS", - "comments": "

Output buffer too short to hold data

\n", + "comments": "

Output buffer too short to hold data.

\n", "value": -6 }, { "type": "int", "name": "GIT_EUSER", - "comments": "", + "comments": "

GIT_EUSER is a special error that is never generated by libgit2\n code. You can return it from a callback (e.g to stop an iteration)\n to know that it was generated by the callback and not by libgit2.

\n", "value": -7 }, { "type": "int", "name": "GIT_EBAREREPO", - "comments": "

Operation not allowed on bare repository

\n", + "comments": "

Operation not allowed on bare repository.

\n", "value": -8 }, { "type": "int", "name": "GIT_EUNBORNBRANCH", - "comments": "

HEAD refers to branch with no commits

\n", + "comments": "

HEAD refers to branch with no commits.

\n", "value": -9 }, { @@ -30662,7 +29332,7 @@ { "type": "int", "name": "GIT_PASSTHROUGH", - "comments": "

Internal only

\n", + "comments": "

A user-configured callback refused to act

\n", "value": -30 }, { @@ -30682,271 +29352,323 @@ "name": "GIT_EMISMATCH", "comments": "

Hashsum mismatch in object

\n", "value": -33 + }, + { + "type": "int", + "name": "GIT_EINDEXDIRTY", + "comments": "

Unsaved changes in the index would be overwritten

\n", + "value": -34 + }, + { + "type": "int", + "name": "GIT_EAPPLYFAIL", + "comments": "

Patch application failed

\n", + "value": -35 + }, + { + "type": "int", + "name": "GIT_EOWNER", + "comments": "

The object is not owned by the current user

\n", + "value": -36 + }, + { + "type": "int", + "name": "GIT_TIMEOUT", + "comments": "

The operation timed out

\n", + "value": -37 + }, + { + "type": "int", + "name": "GIT_EUNCHANGED", + "comments": "

There were no changes

\n", + "value": -38 + }, + { + "type": "int", + "name": "GIT_ENOTSUPPORTED", + "comments": "

An option is not supported

\n", + "value": -39 + }, + { + "type": "int", + "name": "GIT_EREADONLY", + "comments": "

The subject is read-only

\n", + "value": -40 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ "git_error_t", { "decl": [ - "GITERR_NONE", - "GITERR_NOMEMORY", - "GITERR_OS", - "GITERR_INVALID", - "GITERR_REFERENCE", - "GITERR_ZLIB", - "GITERR_REPOSITORY", - "GITERR_CONFIG", - "GITERR_REGEX", - "GITERR_ODB", - "GITERR_INDEX", - "GITERR_OBJECT", - "GITERR_NET", - "GITERR_TAG", - "GITERR_TREE", - "GITERR_INDEXER", - "GITERR_SSL", - "GITERR_SUBMODULE", - "GITERR_THREAD", - "GITERR_STASH", - "GITERR_CHECKOUT", - "GITERR_FETCHHEAD", - "GITERR_MERGE", - "GITERR_SSH", - "GITERR_FILTER", - "GITERR_REVERT", - "GITERR_CALLBACK", - "GITERR_CHERRYPICK", - "GITERR_DESCRIBE", - "GITERR_REBASE", - "GITERR_FILESYSTEM", - "GITERR_PATCH", - "GITERR_WORKTREE", - "GITERR_SHA1" + "GIT_ERROR_NONE", + "GIT_ERROR_NOMEMORY", + "GIT_ERROR_OS", + "GIT_ERROR_INVALID", + "GIT_ERROR_REFERENCE", + "GIT_ERROR_ZLIB", + "GIT_ERROR_REPOSITORY", + "GIT_ERROR_CONFIG", + "GIT_ERROR_REGEX", + "GIT_ERROR_ODB", + "GIT_ERROR_INDEX", + "GIT_ERROR_OBJECT", + "GIT_ERROR_NET", + "GIT_ERROR_TAG", + "GIT_ERROR_TREE", + "GIT_ERROR_INDEXER", + "GIT_ERROR_SSL", + "GIT_ERROR_SUBMODULE", + "GIT_ERROR_THREAD", + "GIT_ERROR_STASH", + "GIT_ERROR_CHECKOUT", + "GIT_ERROR_FETCHHEAD", + "GIT_ERROR_MERGE", + "GIT_ERROR_SSH", + "GIT_ERROR_FILTER", + "GIT_ERROR_REVERT", + "GIT_ERROR_CALLBACK", + "GIT_ERROR_CHERRYPICK", + "GIT_ERROR_DESCRIBE", + "GIT_ERROR_REBASE", + "GIT_ERROR_FILESYSTEM", + "GIT_ERROR_PATCH", + "GIT_ERROR_WORKTREE", + "GIT_ERROR_SHA", + "GIT_ERROR_HTTP", + "GIT_ERROR_INTERNAL", + "GIT_ERROR_GRAFTS" ], "type": "enum", - "file": "errors.h", - "line": 72, - "lineto": 107, - "block": "GITERR_NONE\nGITERR_NOMEMORY\nGITERR_OS\nGITERR_INVALID\nGITERR_REFERENCE\nGITERR_ZLIB\nGITERR_REPOSITORY\nGITERR_CONFIG\nGITERR_REGEX\nGITERR_ODB\nGITERR_INDEX\nGITERR_OBJECT\nGITERR_NET\nGITERR_TAG\nGITERR_TREE\nGITERR_INDEXER\nGITERR_SSL\nGITERR_SUBMODULE\nGITERR_THREAD\nGITERR_STASH\nGITERR_CHECKOUT\nGITERR_FETCHHEAD\nGITERR_MERGE\nGITERR_SSH\nGITERR_FILTER\nGITERR_REVERT\nGITERR_CALLBACK\nGITERR_CHERRYPICK\nGITERR_DESCRIBE\nGITERR_REBASE\nGITERR_FILESYSTEM\nGITERR_PATCH\nGITERR_WORKTREE\nGITERR_SHA1", + "file": "git2/errors.h", + "line": 79, + "lineto": 117, + "block": "GIT_ERROR_NONE\nGIT_ERROR_NOMEMORY\nGIT_ERROR_OS\nGIT_ERROR_INVALID\nGIT_ERROR_REFERENCE\nGIT_ERROR_ZLIB\nGIT_ERROR_REPOSITORY\nGIT_ERROR_CONFIG\nGIT_ERROR_REGEX\nGIT_ERROR_ODB\nGIT_ERROR_INDEX\nGIT_ERROR_OBJECT\nGIT_ERROR_NET\nGIT_ERROR_TAG\nGIT_ERROR_TREE\nGIT_ERROR_INDEXER\nGIT_ERROR_SSL\nGIT_ERROR_SUBMODULE\nGIT_ERROR_THREAD\nGIT_ERROR_STASH\nGIT_ERROR_CHECKOUT\nGIT_ERROR_FETCHHEAD\nGIT_ERROR_MERGE\nGIT_ERROR_SSH\nGIT_ERROR_FILTER\nGIT_ERROR_REVERT\nGIT_ERROR_CALLBACK\nGIT_ERROR_CHERRYPICK\nGIT_ERROR_DESCRIBE\nGIT_ERROR_REBASE\nGIT_ERROR_FILESYSTEM\nGIT_ERROR_PATCH\nGIT_ERROR_WORKTREE\nGIT_ERROR_SHA\nGIT_ERROR_HTTP\nGIT_ERROR_INTERNAL\nGIT_ERROR_GRAFTS", "tdef": "typedef", - "description": " Error classes ", + "description": " Error classes are the category of error. They reflect the area of the\n code where an error occurred.", "comments": "", "fields": [ { "type": "int", - "name": "GITERR_NONE", + "name": "GIT_ERROR_NONE", "comments": "", "value": 0 }, { "type": "int", - "name": "GITERR_NOMEMORY", + "name": "GIT_ERROR_NOMEMORY", "comments": "", "value": 1 }, + { "type": "int", "name": "GIT_ERROR_OS", "comments": "", "value": 2 }, { "type": "int", - "name": "GITERR_OS", - "comments": "", - "value": 2 - }, - { - "type": "int", - "name": "GITERR_INVALID", + "name": "GIT_ERROR_INVALID", "comments": "", "value": 3 }, { "type": "int", - "name": "GITERR_REFERENCE", + "name": "GIT_ERROR_REFERENCE", "comments": "", "value": 4 }, { "type": "int", - "name": "GITERR_ZLIB", + "name": "GIT_ERROR_ZLIB", "comments": "", "value": 5 }, { "type": "int", - "name": "GITERR_REPOSITORY", + "name": "GIT_ERROR_REPOSITORY", "comments": "", "value": 6 }, { "type": "int", - "name": "GITERR_CONFIG", + "name": "GIT_ERROR_CONFIG", "comments": "", "value": 7 }, { "type": "int", - "name": "GITERR_REGEX", + "name": "GIT_ERROR_REGEX", "comments": "", "value": 8 }, { "type": "int", - "name": "GITERR_ODB", + "name": "GIT_ERROR_ODB", "comments": "", "value": 9 }, { "type": "int", - "name": "GITERR_INDEX", + "name": "GIT_ERROR_INDEX", "comments": "", "value": 10 }, { "type": "int", - "name": "GITERR_OBJECT", + "name": "GIT_ERROR_OBJECT", "comments": "", "value": 11 }, { "type": "int", - "name": "GITERR_NET", + "name": "GIT_ERROR_NET", "comments": "", "value": 12 }, { "type": "int", - "name": "GITERR_TAG", + "name": "GIT_ERROR_TAG", "comments": "", "value": 13 }, { "type": "int", - "name": "GITERR_TREE", + "name": "GIT_ERROR_TREE", "comments": "", "value": 14 }, { "type": "int", - "name": "GITERR_INDEXER", + "name": "GIT_ERROR_INDEXER", "comments": "", "value": 15 }, { "type": "int", - "name": "GITERR_SSL", + "name": "GIT_ERROR_SSL", "comments": "", "value": 16 }, { "type": "int", - "name": "GITERR_SUBMODULE", + "name": "GIT_ERROR_SUBMODULE", "comments": "", "value": 17 }, { "type": "int", - "name": "GITERR_THREAD", + "name": "GIT_ERROR_THREAD", "comments": "", "value": 18 }, { "type": "int", - "name": "GITERR_STASH", + "name": "GIT_ERROR_STASH", "comments": "", "value": 19 }, { "type": "int", - "name": "GITERR_CHECKOUT", + "name": "GIT_ERROR_CHECKOUT", "comments": "", "value": 20 }, { "type": "int", - "name": "GITERR_FETCHHEAD", + "name": "GIT_ERROR_FETCHHEAD", "comments": "", "value": 21 }, { "type": "int", - "name": "GITERR_MERGE", + "name": "GIT_ERROR_MERGE", "comments": "", "value": 22 }, { "type": "int", - "name": "GITERR_SSH", + "name": "GIT_ERROR_SSH", "comments": "", "value": 23 }, { "type": "int", - "name": "GITERR_FILTER", + "name": "GIT_ERROR_FILTER", "comments": "", "value": 24 }, { "type": "int", - "name": "GITERR_REVERT", + "name": "GIT_ERROR_REVERT", "comments": "", "value": 25 }, { "type": "int", - "name": "GITERR_CALLBACK", + "name": "GIT_ERROR_CALLBACK", "comments": "", "value": 26 }, { "type": "int", - "name": "GITERR_CHERRYPICK", + "name": "GIT_ERROR_CHERRYPICK", "comments": "", "value": 27 }, { "type": "int", - "name": "GITERR_DESCRIBE", + "name": "GIT_ERROR_DESCRIBE", "comments": "", "value": 28 }, { "type": "int", - "name": "GITERR_REBASE", + "name": "GIT_ERROR_REBASE", + "comments": "", + "value": 29 + }, + { + "type": "int", + "name": "GIT_ERROR_FILESYSTEM", + "comments": "", + "value": 30 + }, + { + "type": "int", + "name": "GIT_ERROR_PATCH", + "comments": "", + "value": 31 + }, + { + "type": "int", + "name": "GIT_ERROR_WORKTREE", "comments": "", - "value": 29 + "value": 32 }, { "type": "int", - "name": "GITERR_FILESYSTEM", + "name": "GIT_ERROR_SHA", "comments": "", - "value": 30 + "value": 33 }, { "type": "int", - "name": "GITERR_PATCH", + "name": "GIT_ERROR_HTTP", "comments": "", - "value": 31 + "value": 34 }, { "type": "int", - "name": "GITERR_WORKTREE", + "name": "GIT_ERROR_INTERNAL", "comments": "", - "value": 32 + "value": 35 }, { "type": "int", - "name": "GITERR_SHA1", + "name": "GIT_ERROR_GRAFTS", "comments": "", - "value": 33 + "value": 36 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -30956,46 +29678,128 @@ "GIT_FEATURE_THREADS", "GIT_FEATURE_HTTPS", "GIT_FEATURE_SSH", - "GIT_FEATURE_NSEC" + "GIT_FEATURE_NSEC", + "GIT_FEATURE_HTTP_PARSER", + "GIT_FEATURE_REGEX", + "GIT_FEATURE_I18N", + "GIT_FEATURE_AUTH_NTLM", + "GIT_FEATURE_AUTH_NEGOTIATE", + "GIT_FEATURE_COMPRESSION", + "GIT_FEATURE_SHA1", + "GIT_FEATURE_SHA256" ], "type": "enum", - "file": "common.h", - "line": 111, - "lineto": 134, - "block": "GIT_FEATURE_THREADS\nGIT_FEATURE_HTTPS\nGIT_FEATURE_SSH\nGIT_FEATURE_NSEC", + "file": "git2/common.h", + "line": 138, + "lineto": 177, + "block": "GIT_FEATURE_THREADS\nGIT_FEATURE_HTTPS\nGIT_FEATURE_SSH\nGIT_FEATURE_NSEC\nGIT_FEATURE_HTTP_PARSER\nGIT_FEATURE_REGEX\nGIT_FEATURE_I18N\nGIT_FEATURE_AUTH_NTLM\nGIT_FEATURE_AUTH_NEGOTIATE\nGIT_FEATURE_COMPRESSION\nGIT_FEATURE_SHA1\nGIT_FEATURE_SHA256", "tdef": "typedef", - "description": " Combinations of these values describe the features with which libgit2\n was compiled", + "description": " Configurable features of libgit2; either optional settings (like\n threading), or features that can be enabled by one of a number of\n different backend \"providers\" (like HTTPS, which can be provided by\n OpenSSL, mbedTLS, or system libraries).", "comments": "", "fields": [ { "type": "int", "name": "GIT_FEATURE_THREADS", - "comments": "

If set, libgit2 was built thread-aware and can be safely used from multiple\n threads.

\n", + "comments": "

libgit2 is thread-aware and can be used from multiple threads\n (as described in the documentation).

\n", "value": 1 }, { "type": "int", "name": "GIT_FEATURE_HTTPS", - "comments": "

If set, libgit2 was built with and linked against a TLS implementation.\n Custom TLS streams may still be added by the user to support HTTPS\n regardless of this.

\n", + "comments": "

HTTPS remotes

\n", "value": 2 }, { "type": "int", "name": "GIT_FEATURE_SSH", - "comments": "

If set, libgit2 was built with and linked against libssh2. A custom\n transport may still be added by the user to support libssh2 regardless of\n this.

\n", + "comments": "

SSH remotes

\n", "value": 4 }, { "type": "int", "name": "GIT_FEATURE_NSEC", - "comments": "

If set, libgit2 was built with support for sub-second resolution in file\n modification times.

\n", + "comments": "

Sub-second resolution in index timestamps

\n", "value": 8 + }, + { + "type": "int", + "name": "GIT_FEATURE_HTTP_PARSER", + "comments": "

HTTP parsing; always available

\n", + "value": 16 + }, + { + "type": "int", + "name": "GIT_FEATURE_REGEX", + "comments": "

Regular expression support; always available

\n", + "value": 32 + }, + { + "type": "int", + "name": "GIT_FEATURE_I18N", + "comments": "

Internationalization support for filename translation

\n", + "value": 64 + }, + { + "type": "int", + "name": "GIT_FEATURE_AUTH_NTLM", + "comments": "

NTLM support over HTTPS

\n", + "value": 128 + }, + { + "type": "int", + "name": "GIT_FEATURE_AUTH_NEGOTIATE", + "comments": "

Kerberos (SPNEGO) authentication support over HTTPS

\n", + "value": 256 + }, + { + "type": "int", + "name": "GIT_FEATURE_COMPRESSION", + "comments": "

zlib support; always available

\n", + "value": 512 + }, + { + "type": "int", + "name": "GIT_FEATURE_SHA1", + "comments": "

SHA1 object support; always available

\n", + "value": 1024 + }, + { + "type": "int", + "name": "GIT_FEATURE_SHA256", + "comments": "

SHA256 object support

\n", + "value": 2048 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": ["git_libgit2_feature_backend"] } + } + ], + [ + "git_fetch_depth_t", + { + "decl": ["GIT_FETCH_DEPTH_FULL", "GIT_FETCH_DEPTH_UNSHALLOW"], + "type": "enum", + "file": "git2/remote.h", + "line": 760, + "lineto": 766, + "block": "GIT_FETCH_DEPTH_FULL\nGIT_FETCH_DEPTH_UNSHALLOW", + "tdef": "typedef", + "description": " Constants for fetch depth (shallowness of fetch). ", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_FETCH_DEPTH_FULL", + "comments": "

The fetch is "full" (not shallow). This is the default.

\n", + "value": 0 + }, + { + "type": "int", + "name": "GIT_FETCH_DEPTH_UNSHALLOW", + "comments": "

The fetch should "unshallow" and fetch missing data.

\n", + "value": 2147483647 + } + ], + "used": { "returns": [], "needs": [] } } ], [ @@ -31005,26 +29809,24 @@ "int version", "git_remote_callbacks callbacks", "git_fetch_prune_t prune", - "int update_fetchhead", + "unsigned int update_fetchhead", "git_remote_autotag_option_t download_tags", "git_proxy_options proxy_opts", + "int depth", + "git_remote_redirect_t follow_redirects", "git_strarray custom_headers" ], "type": "struct", "value": "git_fetch_options", - "file": "remote.h", - "line": 555, - "lineto": 592, - "block": "int version\ngit_remote_callbacks callbacks\ngit_fetch_prune_t prune\nint update_fetchhead\ngit_remote_autotag_option_t download_tags\ngit_proxy_options proxy_opts\ngit_strarray custom_headers", + "file": "git2/remote.h", + "line": 776, + "lineto": 828, + "block": "int version\ngit_remote_callbacks callbacks\ngit_fetch_prune_t prune\nunsigned int update_fetchhead\ngit_remote_autotag_option_t download_tags\ngit_proxy_options proxy_opts\nint depth\ngit_remote_redirect_t follow_redirects\ngit_strarray custom_headers", "tdef": "typedef", "description": " Fetch options structure.", "comments": "

Zero out for defaults. Initialize with GIT_FETCH_OPTIONS_INIT macro to correctly set the version field. E.g.

\n\n
    git_fetch_options opts = GIT_FETCH_OPTIONS_INIT;\n
\n", "fields": [ - { - "type": "int", - "name": "version", - "comments": "" - }, + { "type": "int", "name": "version", "comments": "" }, { "type": "git_remote_callbacks", "name": "callbacks", @@ -31036,9 +29838,9 @@ "comments": " Whether to perform a prune after the fetch" }, { - "type": "int", + "type": "unsigned int", "name": "update_fetchhead", - "comments": " Whether to write the results to FETCH_HEAD. Defaults to\n on. Leave this default in order to behave like git." + "comments": " How to handle reference updates; see `git_remote_update_flags`." }, { "type": "git_remote_autotag_option_t", @@ -31050,6 +29852,16 @@ "name": "proxy_opts", "comments": " Proxy options to use, by default no proxy is used." }, + { + "type": "int", + "name": "depth", + "comments": " Depth of the fetch to perform, or `GIT_FETCH_DEPTH_FULL`\n (or `0`) for full history, or `GIT_FETCH_DEPTH_UNSHALLOW`\n to \"unshallow\" a shallow repository.\n\n The default is full (`GIT_FETCH_DEPTH_FULL` or `0`)." + }, + { + "type": "git_remote_redirect_t", + "name": "follow_redirects", + "comments": " Whether to allow off-site redirects. If this is not\n specified, the `http.followRedirects` configuration setting\n will be consulted." + }, { "type": "git_strarray", "name": "custom_headers", @@ -31059,13 +29871,52 @@ "used": { "returns": [], "needs": [ - "git_fetch_init_options", + "git_fetch_options_init", "git_remote_download", "git_remote_fetch" ] } } ], + [ + "git_fetch_prune_t", + { + "decl": [ + "GIT_FETCH_PRUNE_UNSPECIFIED", + "GIT_FETCH_PRUNE", + "GIT_FETCH_NO_PRUNE" + ], + "type": "enum", + "file": "git2/remote.h", + "line": 719, + "lineto": 732, + "block": "GIT_FETCH_PRUNE_UNSPECIFIED\nGIT_FETCH_PRUNE\nGIT_FETCH_NO_PRUNE", + "tdef": "typedef", + "description": " Acceptable prune settings when fetching ", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_FETCH_PRUNE_UNSPECIFIED", + "comments": "

Use the setting from the configuration

\n", + "value": 0 + }, + { + "type": "int", + "name": "GIT_FETCH_PRUNE", + "comments": "

Force pruning on

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_FETCH_NO_PRUNE", + "comments": "

Force pruning off

\n", + "value": 2 + } + ], + "used": { "returns": [], "needs": [] } + } + ], [ "git_filemode_t", { @@ -31078,9 +29929,9 @@ "GIT_FILEMODE_COMMIT" ], "type": "enum", - "file": "types.h", - "line": 209, - "lineto": 216, + "file": "git2/types.h", + "line": 238, + "lineto": 245, "block": "GIT_FILEMODE_UNREADABLE\nGIT_FILEMODE_TREE\nGIT_FILEMODE_BLOB\nGIT_FILEMODE_BLOB_EXECUTABLE\nGIT_FILEMODE_LINK\nGIT_FILEMODE_COMMIT", "tdef": "typedef", "description": " Valid modes for index and tree entries. ", @@ -31124,111 +29975,38 @@ } ], "used": { - "returns": [ - "git_tree_entry_filemode", - "git_tree_entry_filemode_raw" - ], - "needs": [ - "git_treebuilder_insert" - ] + "returns": ["git_tree_entry_filemode", "git_tree_entry_filemode_raw"], + "needs": ["git_treebuilder_insert"] } } ], [ "git_filter", { - "decl": [ - "unsigned int version", - "const char * attributes", - "git_filter_init_fn initialize", - "git_filter_shutdown_fn shutdown", - "git_filter_check_fn check", - "git_filter_apply_fn apply", - "git_filter_stream_fn stream", - "git_filter_cleanup_fn cleanup" - ], + "decl": "git_filter", "type": "struct", "value": "git_filter", - "file": "sys/filter.h", - "line": 226, - "lineto": 271, - "tdef": null, - "description": " Filter structure used to register custom filters.", - "comments": "

To associate extra data with a filter, allocate extra data and put the git_filter struct at the start of your data buffer, then cast the self pointer to your larger structure when your callback is invoked.

\n", - "block": "unsigned int version\nconst char * attributes\ngit_filter_init_fn initialize\ngit_filter_shutdown_fn shutdown\ngit_filter_check_fn check\ngit_filter_apply_fn apply\ngit_filter_stream_fn stream\ngit_filter_cleanup_fn cleanup", - "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": " The `version` field should be set to `GIT_FILTER_VERSION`. " - }, - { - "type": "const char *", - "name": "attributes", - "comments": " A whitespace-separated list of attribute names to check for this\n filter (e.g. \"eol crlf text\"). If the attribute name is bare, it\n will be simply loaded and passed to the `check` callback. If it\n has a value (i.e. \"name=value\"), the attribute must match that\n value for the filter to be applied. The value may be a wildcard\n (eg, \"name=*\"), in which case the filter will be invoked for any\n value for the given attribute name. See the attribute parameter\n of the `check` callback for the attribute value that was specified." - }, - { - "type": "git_filter_init_fn", - "name": "initialize", - "comments": " Called when the filter is first used for any file. " - }, - { - "type": "git_filter_shutdown_fn", - "name": "shutdown", - "comments": " Called when the filter is removed or unregistered from the system. " - }, - { - "type": "git_filter_check_fn", - "name": "check", - "comments": " Called to determine whether the filter should be invoked for a\n given file. If this function returns `GIT_PASSTHROUGH` then the\n `apply` function will not be invoked and the contents will be passed\n through unmodified." - }, - { - "type": "git_filter_apply_fn", - "name": "apply", - "comments": " Called to actually apply the filter to file contents. If this\n function returns `GIT_PASSTHROUGH` then the contents will be passed\n through unmodified." - }, - { - "type": "git_filter_stream_fn", - "name": "stream", - "comments": " Called to apply the filter in a streaming manner. If this is not\n specified then the system will call `apply` with the whole buffer." - }, - { - "type": "git_filter_cleanup_fn", - "name": "cleanup", - "comments": " Called when the system is done filtering for a file. " - } - ], + "file": "git2/filter.h", + "line": 109, + "lineto": 109, + "tdef": "typedef", + "description": " A filter that can transform file data", + "comments": "

This represents a filter that can be used to transform or even replace file data. Libgit2 includes one built in filter and it is possible to write your own (see git2/sys/filter.h for information on that).

\n\n

The two builtin filters are:

\n\n\n", "used": { - "returns": [ - "git_filter_lookup", - "git_filter_source_mode" - ], + "returns": [], "needs": [ - "git_filter_apply_fn", - "git_filter_check_fn", - "git_filter_cleanup_fn", - "git_filter_init", - "git_filter_init_fn", "git_filter_list_apply_to_blob", + "git_filter_list_apply_to_buffer", "git_filter_list_apply_to_data", "git_filter_list_apply_to_file", "git_filter_list_contains", "git_filter_list_free", - "git_filter_list_length", "git_filter_list_load", - "git_filter_list_new", - "git_filter_list_push", + "git_filter_list_load_ext", "git_filter_list_stream_blob", + "git_filter_list_stream_buffer", "git_filter_list_stream_data", - "git_filter_list_stream_file", - "git_filter_register", - "git_filter_shutdown_fn", - "git_filter_source_filemode", - "git_filter_source_flags", - "git_filter_source_id", - "git_filter_source_mode", - "git_filter_source_path", - "git_filter_source_repo" + "git_filter_list_stream_file" ] } } @@ -31238,13 +30016,16 @@ { "decl": [ "GIT_FILTER_DEFAULT", - "GIT_FILTER_ALLOW_UNSAFE" + "GIT_FILTER_ALLOW_UNSAFE", + "GIT_FILTER_NO_SYSTEM_ATTRIBUTES", + "GIT_FILTER_ATTRIBUTES_FROM_HEAD", + "GIT_FILTER_ATTRIBUTES_FROM_COMMIT" ], "type": "enum", - "file": "filter.h", - "line": 41, - "lineto": 44, - "block": "GIT_FILTER_DEFAULT\nGIT_FILTER_ALLOW_UNSAFE", + "file": "git2/filter.h", + "line": 47, + "lineto": 64, + "block": "GIT_FILTER_DEFAULT\nGIT_FILTER_ALLOW_UNSAFE\nGIT_FILTER_NO_SYSTEM_ATTRIBUTES\nGIT_FILTER_ATTRIBUTES_FROM_HEAD\nGIT_FILTER_ATTRIBUTES_FROM_COMMIT", "tdef": "typedef", "description": " Filter option flags.", "comments": "", @@ -31258,14 +30039,29 @@ { "type": "int", "name": "GIT_FILTER_ALLOW_UNSAFE", - "comments": "", + "comments": "

Don't error for safecrlf violations, allow them to continue.

\n", "value": 1 + }, + { + "type": "int", + "name": "GIT_FILTER_NO_SYSTEM_ATTRIBUTES", + "comments": "

Don't load /etc/gitattributes (or the system equivalent)

\n", + "value": 2 + }, + { + "type": "int", + "name": "GIT_FILTER_ATTRIBUTES_FROM_HEAD", + "comments": "

Load attributes from .gitattributes in the root of HEAD

\n", + "value": 4 + }, + { + "type": "int", + "name": "GIT_FILTER_ATTRIBUTES_FROM_COMMIT", + "comments": "

Load attributes from .gitattributes in a given commit.\n This can only be specified in a git_filter_options.

\n", + "value": 8 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -31274,9 +30070,9 @@ "decl": "git_filter_list", "type": "struct", "value": "git_filter_list", - "file": "filter.h", - "line": 73, - "lineto": 73, + "file": "git2/filter.h", + "line": 121, + "lineto": 121, "tdef": "typedef", "description": " List of filters to be applied", "comments": "

This represents a list of filters to be applied to a file / blob. You can build the list with one call, apply it with another, and dispose it with a third. In typical usage, there are not many occasions where a git_filter_list is needed directly since the library will generally handle conversions for you, but it can be convenient to be able to build and apply the list sometimes.

\n", @@ -31284,15 +30080,15 @@ "returns": [], "needs": [ "git_filter_list_apply_to_blob", + "git_filter_list_apply_to_buffer", "git_filter_list_apply_to_data", "git_filter_list_apply_to_file", "git_filter_list_contains", "git_filter_list_free", - "git_filter_list_length", "git_filter_list_load", - "git_filter_list_new", - "git_filter_list_push", + "git_filter_list_load_ext", "git_filter_list_stream_blob", + "git_filter_list_stream_buffer", "git_filter_list_stream_data", "git_filter_list_stream_file" ] @@ -31309,9 +30105,9 @@ "GIT_FILTER_CLEAN" ], "type": "enum", - "file": "filter.h", - "line": 31, - "lineto": 36, + "file": "git2/filter.h", + "line": 37, + "lineto": 42, "block": "GIT_FILTER_TO_WORKTREE\nGIT_FILTER_SMUDGE\nGIT_FILTER_TO_ODB\nGIT_FILTER_CLEAN", "tdef": "typedef", "description": " Filters are applied in one of two directions: smudging - which is\n exporting a file from the Git object database to the working directory,\n and cleaning - which is importing a file from the working directory to\n the Git object database. These values control which direction of\n change is being applied.", @@ -31343,64 +30139,59 @@ } ], "used": { - "returns": [ - "git_filter_source_mode" - ], - "needs": [ - "git_filter_list_load", - "git_filter_list_new" - ] + "returns": [], + "needs": ["git_filter_list_load", "git_filter_list_load_ext"] } } ], [ - "git_filter_source", + "git_filter_options", { - "decl": "git_filter_source", + "decl": [ + "unsigned int version", + "uint32_t flags", + "git_oid * commit_id", + "git_oid attr_commit_id" + ], "type": "struct", - "value": "git_filter_source", - "file": "sys/filter.h", - "line": 95, - "lineto": 95, + "value": "git_filter_options", + "file": "git2/filter.h", + "line": 69, + "lineto": 86, + "block": "unsigned int version\nuint32_t flags\ngit_oid * commit_id\ngit_oid attr_commit_id", "tdef": "typedef", - "description": " A filter source represents a file/blob to be processed", + "description": " Filtering options", "comments": "", - "used": { - "returns": [], - "needs": [ - "git_filter_apply_fn", - "git_filter_check_fn", - "git_filter_source_filemode", - "git_filter_source_flags", - "git_filter_source_id", - "git_filter_source_mode", - "git_filter_source_path", - "git_filter_source_repo" - ] - } + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "uint32_t", + "name": "flags", + "comments": " See `git_filter_flag_t` above " + }, + { "type": "git_oid *", "name": "commit_id", "comments": "" }, + { + "type": "git_oid", + "name": "attr_commit_id", + "comments": " The commit to load attributes from, when\n `GIT_FILTER_ATTRIBUTES_FROM_COMMIT` is specified." + } + ], + "used": { "returns": [], "needs": ["git_filter_list_load_ext"] } } ], [ - "git_hashsig", + "git_filter_source", { - "decl": "git_hashsig", + "decl": "git_filter_source", "type": "struct", - "value": "git_hashsig", - "file": "sys/hashsig.h", - "line": 17, - "lineto": 17, + "value": "git_filter_source", + "file": "git2/sys/filter.h", + "line": 109, + "lineto": 109, "tdef": "typedef", - "description": " Similarity signature of arbitrary text content based on line hashes", + "description": " A filter source represents a file/blob to be processed", "comments": "", - "used": { - "returns": [], - "needs": [ - "git_hashsig_compare", - "git_hashsig_create", - "git_hashsig_create_fromfile", - "git_hashsig_free" - ] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -31413,165 +30204,40 @@ "GIT_HASHSIG_ALLOW_SMALL_FILES" ], "type": "enum", - "file": "sys/hashsig.h", - "line": 25, - "lineto": 45, - "block": "GIT_HASHSIG_NORMAL\nGIT_HASHSIG_IGNORE_WHITESPACE\nGIT_HASHSIG_SMART_WHITESPACE\nGIT_HASHSIG_ALLOW_SMALL_FILES", - "tdef": "typedef", - "description": " Options for hashsig computation", - "comments": "

The options GIT_HASHSIG_NORMAL, GIT_HASHSIG_IGNORE_WHITESPACE, GIT_HASHSIG_SMART_WHITESPACE are exclusive and should not be combined.

\n", - "fields": [ - { - "type": "int", - "name": "GIT_HASHSIG_NORMAL", - "comments": "

Use all data

\n", - "value": 0 - }, - { - "type": "int", - "name": "GIT_HASHSIG_IGNORE_WHITESPACE", - "comments": "

Ignore whitespace

\n", - "value": 1 - }, - { - "type": "int", - "name": "GIT_HASHSIG_SMART_WHITESPACE", - "comments": "

Ignore

\n\n

and all space after

\n", - "value": 2 - }, - { - "type": "int", - "name": "GIT_HASHSIG_ALLOW_SMALL_FILES", - "comments": "

Allow hashing of small files

\n", - "value": 4 - } - ], - "used": { - "returns": [], - "needs": [ - "git_hashsig_create", - "git_hashsig_create_fromfile" - ] - } - } - ], - [ - "git_idxentry_extended_flag_t", - { - "decl": [ - "GIT_IDXENTRY_INTENT_TO_ADD", - "GIT_IDXENTRY_SKIP_WORKTREE", - "GIT_IDXENTRY_EXTENDED2", - "GIT_IDXENTRY_EXTENDED_FLAGS", - "GIT_IDXENTRY_UPDATE", - "GIT_IDXENTRY_REMOVE", - "GIT_IDXENTRY_UPTODATE", - "GIT_IDXENTRY_ADDED", - "GIT_IDXENTRY_HASHED", - "GIT_IDXENTRY_UNHASHED", - "GIT_IDXENTRY_WT_REMOVE", - "GIT_IDXENTRY_CONFLICTED", - "GIT_IDXENTRY_UNPACKED", - "GIT_IDXENTRY_NEW_SKIP_WORKTREE" - ], - "type": "enum", - "file": "index.h", - "line": 115, - "lineto": 135, - "block": "GIT_IDXENTRY_INTENT_TO_ADD\nGIT_IDXENTRY_SKIP_WORKTREE\nGIT_IDXENTRY_EXTENDED2\nGIT_IDXENTRY_EXTENDED_FLAGS\nGIT_IDXENTRY_UPDATE\nGIT_IDXENTRY_REMOVE\nGIT_IDXENTRY_UPTODATE\nGIT_IDXENTRY_ADDED\nGIT_IDXENTRY_HASHED\nGIT_IDXENTRY_UNHASHED\nGIT_IDXENTRY_WT_REMOVE\nGIT_IDXENTRY_CONFLICTED\nGIT_IDXENTRY_UNPACKED\nGIT_IDXENTRY_NEW_SKIP_WORKTREE", - "tdef": "typedef", - "description": " Bitmasks for on-disk fields of `git_index_entry`'s `flags_extended`", - "comments": "

In memory, the flags_extended fields are divided into two parts: the fields that are read from and written to disk, and other fields that in-memory only and used by libgit2. Only the flags in GIT_IDXENTRY_EXTENDED_FLAGS will get saved on-disk.

\n\n

Thee first three bitmasks match the three fields in the git_index_entry flags_extended value that belong on disk. You can use them to interpret the data in the flags_extended.

\n\n

The rest of the bitmasks match the other fields in the git_index_entry flags_extended value that are only used in-memory by libgit2. You can use them to interpret the data in the flags_extended.

\n", - "fields": [ - { - "type": "int", - "name": "GIT_IDXENTRY_INTENT_TO_ADD", - "comments": "", - "value": 8192 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_SKIP_WORKTREE", - "comments": "", - "value": 16384 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_EXTENDED2", - "comments": "

Reserved for future extension

\n", - "value": 32768 - }, + "file": "git2/sys/hashsig.h", + "line": 35, + "lineto": 55, + "block": "GIT_HASHSIG_NORMAL\nGIT_HASHSIG_IGNORE_WHITESPACE\nGIT_HASHSIG_SMART_WHITESPACE\nGIT_HASHSIG_ALLOW_SMALL_FILES", + "tdef": "typedef", + "description": " Options for hashsig computation", + "comments": "

The options GIT_HASHSIG_NORMAL, GIT_HASHSIG_IGNORE_WHITESPACE, GIT_HASHSIG_SMART_WHITESPACE are exclusive and should not be combined.

\n", + "fields": [ { "type": "int", - "name": "GIT_IDXENTRY_EXTENDED_FLAGS", - "comments": "

Reserved for future extension

\n", - "value": 24576 + "name": "GIT_HASHSIG_NORMAL", + "comments": "

Use all data

\n", + "value": 0 }, { "type": "int", - "name": "GIT_IDXENTRY_UPDATE", - "comments": "

Reserved for future extension

\n", + "name": "GIT_HASHSIG_IGNORE_WHITESPACE", + "comments": "

Ignore whitespace

\n", "value": 1 }, { "type": "int", - "name": "GIT_IDXENTRY_REMOVE", - "comments": "

Reserved for future extension

\n", + "name": "GIT_HASHSIG_SMART_WHITESPACE", + "comments": "

Ignore

\n\n

and all space after

\n", "value": 2 }, { "type": "int", - "name": "GIT_IDXENTRY_UPTODATE", - "comments": "

Reserved for future extension

\n", + "name": "GIT_HASHSIG_ALLOW_SMALL_FILES", + "comments": "

Allow hashing of small files

\n", "value": 4 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_ADDED", - "comments": "

Reserved for future extension

\n", - "value": 8 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_HASHED", - "comments": "

Reserved for future extension

\n", - "value": 16 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_UNHASHED", - "comments": "

Reserved for future extension

\n", - "value": 32 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_WT_REMOVE", - "comments": "

remove in work directory

\n", - "value": 64 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_CONFLICTED", - "comments": "", - "value": 128 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_UNPACKED", - "comments": "", - "value": 256 - }, - { - "type": "int", - "name": "GIT_IDXENTRY_NEW_SKIP_WORKTREE", - "comments": "", - "value": 512 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -31580,18 +30246,20 @@ "decl": "git_index", "type": "struct", "value": "git_index", - "file": "types.h", - "line": 135, - "lineto": 135, + "file": "git2/types.h", + "line": 153, + "lineto": 153, "tdef": "typedef", "description": " Memory representation of an index file. ", "comments": "", "used": { "returns": [ "git_index_get_byindex", - "git_index_get_bypath" + "git_index_get_bypath", + "git_remote_stats" ], "needs": [ + "git_apply_to_tree", "git_checkout_index", "git_cherrypick_commit", "git_diff_index_to_index", @@ -31600,7 +30268,7 @@ "git_index_add", "git_index_add_all", "git_index_add_bypath", - "git_index_add_frombuffer", + "git_index_add_from_buffer", "git_index_caps", "git_index_checksum", "git_index_clear", @@ -31620,6 +30288,9 @@ "git_index_get_byindex", "git_index_get_bypath", "git_index_has_conflicts", + "git_index_iterator_free", + "git_index_iterator_new", + "git_index_iterator_next", "git_index_new", "git_index_open", "git_index_owner", @@ -31641,14 +30312,18 @@ "git_indexer_commit", "git_indexer_free", "git_indexer_hash", + "git_indexer_name", "git_indexer_new", + "git_indexer_options_init", + "git_indexer_progress_cb", "git_merge_commits", "git_merge_file_from_index", "git_merge_trees", + "git_odb_write_pack", + "git_packbuilder_write", "git_pathspec_match_index", "git_rebase_inmemory_index", "git_repository_index", - "git_repository_set_index", "git_revert_commit" ] } @@ -31664,9 +30339,9 @@ "GIT_INDEX_ADD_CHECK_PATHSPEC" ], "type": "enum", - "file": "index.h", - "line": 150, - "lineto": 155, + "file": "git2/index.h", + "line": 162, + "lineto": 167, "block": "GIT_INDEX_ADD_DEFAULT\nGIT_INDEX_ADD_FORCE\nGIT_INDEX_ADD_DISABLE_PATHSPEC_MATCH\nGIT_INDEX_ADD_CHECK_PATHSPEC", "tdef": "typedef", "description": " Flags for APIs that add files matching pathspec ", @@ -31697,10 +30372,53 @@ "value": 4 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_index_capability_t", + { + "decl": [ + "GIT_INDEX_CAPABILITY_IGNORE_CASE", + "GIT_INDEX_CAPABILITY_NO_FILEMODE", + "GIT_INDEX_CAPABILITY_NO_SYMLINKS", + "GIT_INDEX_CAPABILITY_FROM_OWNER" + ], + "type": "enum", + "file": "git2/index.h", + "line": 142, + "lineto": 147, + "block": "GIT_INDEX_CAPABILITY_IGNORE_CASE\nGIT_INDEX_CAPABILITY_NO_FILEMODE\nGIT_INDEX_CAPABILITY_NO_SYMLINKS\nGIT_INDEX_CAPABILITY_FROM_OWNER", + "tdef": "typedef", + "description": " Capabilities of system that affect index actions. ", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_INDEX_CAPABILITY_IGNORE_CASE", + "comments": "", + "value": 1 + }, + { + "type": "int", + "name": "GIT_INDEX_CAPABILITY_NO_FILEMODE", + "comments": "", + "value": 2 + }, + { + "type": "int", + "name": "GIT_INDEX_CAPABILITY_NO_SYMLINKS", + "comments": "", + "value": 4 + }, + { + "type": "int", + "name": "GIT_INDEX_CAPABILITY_FROM_OWNER", + "comments": "", + "value": -1 + } + ], + "used": { "returns": [], "needs": [] } } ], [ @@ -31709,9 +30427,9 @@ "decl": "git_index_conflict_iterator", "type": "struct", "value": "git_index_conflict_iterator", - "file": "types.h", - "line": 138, - "lineto": 138, + "file": "git2/types.h", + "line": 159, + "lineto": 159, "tdef": "typedef", "description": " An iterator for conflicts in the index. ", "comments": "", @@ -31744,208 +30462,351 @@ ], "type": "struct", "value": "git_index_entry", - "file": "index.h", - "line": 53, - "lineto": 70, + "file": "git2/index.h", + "line": 58, + "lineto": 75, "block": "git_index_time ctime\ngit_index_time mtime\nuint32_t dev\nuint32_t ino\nuint32_t mode\nuint32_t uid\nuint32_t gid\nuint32_t file_size\ngit_oid id\nuint16_t flags\nuint16_t flags_extended\nconst char * path", "tdef": "typedef", "description": " In-memory representation of a file entry in the index.", - "comments": "

This is a public structure that represents a file entry in the index. The meaning of the fields corresponds to core Git's documentation (in "Documentation/technical/index-format.txt").

\n\n

The flags field consists of a number of bit fields which can be accessed via the first set of GIT_IDXENTRY_... bitmasks below. These flags are all read from and persisted to disk.

\n\n

The flags_extended field also has a number of bit fields which can be accessed via the later GIT_IDXENTRY_... bitmasks below. Some of these flags are read from and written to disk, but some are set aside for in-memory only reference.

\n\n

Note that the time and size fields are truncated to 32 bits. This is enough to detect changes, which is enough for the index to function as a cache, but it should not be taken as an authoritative source for that data.

\n", + "comments": "

This is a public structure that represents a file entry in the index. The meaning of the fields corresponds to core Git's documentation (in "Documentation/technical/index-format.txt").

\n\n

The flags field consists of a number of bit fields which can be accessed via the first set of GIT_INDEX_ENTRY_... bitmasks below. These flags are all read from and persisted to disk.

\n\n

The flags_extended field also has a number of bit fields which can be accessed via the later GIT_INDEX_ENTRY_... bitmasks below. Some of these flags are read from and written to disk, but some are set aside for in-memory only reference.

\n\n

Note that the time and size fields are truncated to 32 bits. This is enough to detect changes, which is enough for the index to function as a cache, but it should not be taken as an authoritative source for that data.

\n", + "fields": [ + { "type": "git_index_time", "name": "ctime", "comments": "" }, + { "type": "git_index_time", "name": "mtime", "comments": "" }, + { "type": "uint32_t", "name": "dev", "comments": "" }, + { "type": "uint32_t", "name": "ino", "comments": "" }, + { "type": "uint32_t", "name": "mode", "comments": "" }, + { "type": "uint32_t", "name": "uid", "comments": "" }, + { "type": "uint32_t", "name": "gid", "comments": "" }, + { "type": "uint32_t", "name": "file_size", "comments": "" }, + { "type": "git_oid", "name": "id", "comments": "" }, + { "type": "uint16_t", "name": "flags", "comments": "" }, + { "type": "uint16_t", "name": "flags_extended", "comments": "" }, + { "type": "const char *", "name": "path", "comments": "" } + ], + "used": { + "returns": ["git_index_get_byindex", "git_index_get_bypath"], + "needs": [ + "git_index_add", + "git_index_add_from_buffer", + "git_index_conflict_add", + "git_index_conflict_get", + "git_index_conflict_next", + "git_index_entry_is_conflict", + "git_index_entry_stage", + "git_index_iterator_next", + "git_merge_file_from_index" + ] + } + } + ], + [ + "git_index_entry_extended_flag_t", + { + "decl": [ + "GIT_INDEX_ENTRY_INTENT_TO_ADD", + "GIT_INDEX_ENTRY_SKIP_WORKTREE", + "GIT_INDEX_ENTRY_EXTENDED_FLAGS", + "GIT_INDEX_ENTRY_UPTODATE" + ], + "type": "enum", + "file": "git2/index.h", + "line": 132, + "lineto": 139, + "block": "GIT_INDEX_ENTRY_INTENT_TO_ADD\nGIT_INDEX_ENTRY_SKIP_WORKTREE\nGIT_INDEX_ENTRY_EXTENDED_FLAGS\nGIT_INDEX_ENTRY_UPTODATE", + "tdef": "typedef", + "description": " Bitmasks for on-disk fields of `git_index_entry`'s `flags_extended`", + "comments": "

In memory, the flags_extended fields are divided into two parts: the fields that are read from and written to disk, and other fields that in-memory only and used by libgit2. Only the flags in GIT_INDEX_ENTRY_EXTENDED_FLAGS will get saved on-disk.

\n\n

Thee first three bitmasks match the three fields in the git_index_entry flags_extended value that belong on disk. You can use them to interpret the data in the flags_extended.

\n\n

The rest of the bitmasks match the other fields in the git_index_entry flags_extended value that are only used in-memory by libgit2. You can use them to interpret the data in the flags_extended.

\n", "fields": [ { - "type": "git_index_time", - "name": "ctime", - "comments": "" - }, - { - "type": "git_index_time", - "name": "mtime", - "comments": "" + "type": "int", + "name": "GIT_INDEX_ENTRY_INTENT_TO_ADD", + "comments": "", + "value": 8192 }, { - "type": "uint32_t", - "name": "dev", - "comments": "" + "type": "int", + "name": "GIT_INDEX_ENTRY_SKIP_WORKTREE", + "comments": "", + "value": 16384 }, { - "type": "uint32_t", - "name": "ino", - "comments": "" + "type": "int", + "name": "GIT_INDEX_ENTRY_EXTENDED_FLAGS", + "comments": "", + "value": 24576 }, { - "type": "uint32_t", - "name": "mode", - "comments": "" - }, + "type": "int", + "name": "GIT_INDEX_ENTRY_UPTODATE", + "comments": "", + "value": 4 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_index_entry_flag_t", + { + "decl": ["GIT_INDEX_ENTRY_EXTENDED", "GIT_INDEX_ENTRY_VALID"], + "type": "enum", + "file": "git2/index.h", + "line": 95, + "lineto": 98, + "block": "GIT_INDEX_ENTRY_EXTENDED\nGIT_INDEX_ENTRY_VALID", + "tdef": "typedef", + "description": " Flags for index entries", + "comments": "", + "fields": [ { - "type": "uint32_t", - "name": "uid", - "comments": "" + "type": "int", + "name": "GIT_INDEX_ENTRY_EXTENDED", + "comments": "", + "value": 16384 }, { - "type": "uint32_t", - "name": "gid", - "comments": "" - }, + "type": "int", + "name": "GIT_INDEX_ENTRY_VALID", + "comments": "", + "value": 32768 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_index_iterator", + { + "decl": "git_index_iterator", + "type": "struct", + "value": "git_index_iterator", + "file": "git2/types.h", + "line": 156, + "lineto": 156, + "tdef": "typedef", + "description": " An iterator for entries in the index. ", + "comments": "", + "used": { + "returns": [], + "needs": [ + "git_index_iterator_free", + "git_index_iterator_new", + "git_index_iterator_next" + ] + } + } + ], + [ + "git_index_stage_t", + { + "decl": [ + "GIT_INDEX_STAGE_ANY", + "GIT_INDEX_STAGE_NORMAL", + "GIT_INDEX_STAGE_ANCESTOR", + "GIT_INDEX_STAGE_OURS", + "GIT_INDEX_STAGE_THEIRS" + ], + "type": "enum", + "file": "git2/index.h", + "line": 170, + "lineto": 190, + "block": "GIT_INDEX_STAGE_ANY\nGIT_INDEX_STAGE_NORMAL\nGIT_INDEX_STAGE_ANCESTOR\nGIT_INDEX_STAGE_OURS\nGIT_INDEX_STAGE_THEIRS", + "tdef": "typedef", + "description": " Git index stage states ", + "comments": "", + "fields": [ { - "type": "uint32_t", - "name": "file_size", - "comments": "" + "type": "int", + "name": "GIT_INDEX_STAGE_ANY", + "comments": "

Match any index stage.

\n\n

Some index APIs take a stage to match; pass this value to match\n any entry matching the path regardless of stage.

\n", + "value": -1 }, { - "type": "git_oid", - "name": "id", - "comments": "" + "type": "int", + "name": "GIT_INDEX_STAGE_NORMAL", + "comments": "

A normal staged file in the index.

\n", + "value": 0 }, { - "type": "uint16_t", - "name": "flags", - "comments": "" + "type": "int", + "name": "GIT_INDEX_STAGE_ANCESTOR", + "comments": "

The ancestor side of a conflict.

\n", + "value": 1 }, { - "type": "uint16_t", - "name": "flags_extended", - "comments": "" + "type": "int", + "name": "GIT_INDEX_STAGE_OURS", + "comments": "

The "ours" side of a conflict.

\n", + "value": 2 }, { - "type": "const char *", - "name": "path", - "comments": "" + "type": "int", + "name": "GIT_INDEX_STAGE_THEIRS", + "comments": "

The "theirs" side of a conflict.

\n", + "value": 3 } ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_index_time", + { + "decl": ["int32_t seconds", "uint32_t nanoseconds"], + "type": "struct", + "value": "git_index_time", + "file": "git2/index.h", + "line": 31, + "lineto": 35, + "block": "int32_t seconds\nuint32_t nanoseconds", + "tdef": "typedef", + "description": " Time structure used in a git index entry ", + "comments": "", + "fields": [ + { "type": "int32_t", "name": "seconds", "comments": "" }, + { "type": "uint32_t", "name": "nanoseconds", "comments": "" } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_indexer", + { + "decl": "git_indexer", + "type": "struct", + "value": "git_indexer", + "file": "git2/indexer.h", + "line": 27, + "lineto": 27, + "tdef": "typedef", + "description": " A git indexer object ", + "comments": "", "used": { - "returns": [ - "git_index_get_byindex", - "git_index_get_bypath" - ], + "returns": ["git_remote_stats"], "needs": [ - "git_index_add", - "git_index_add_frombuffer", - "git_index_conflict_add", - "git_index_conflict_get", - "git_index_conflict_next", - "git_index_entry_is_conflict", - "git_index_entry_stage", - "git_merge_file_from_index" + "git_indexer_append", + "git_indexer_commit", + "git_indexer_free", + "git_indexer_hash", + "git_indexer_name", + "git_indexer_new", + "git_indexer_options_init", + "git_indexer_progress_cb", + "git_odb_write_pack", + "git_packbuilder_write" ] } } ], [ - "git_index_time", + "git_indexer_options", { "decl": [ - "int32_t seconds", - "uint32_t nanoseconds" + "unsigned int version", + "git_indexer_progress_cb progress_cb", + "void * progress_cb_payload", + "unsigned char verify" ], "type": "struct", - "value": "git_index_time", - "file": "index.h", - "line": 26, - "lineto": 30, - "block": "int32_t seconds\nuint32_t nanoseconds", + "value": "git_indexer_options", + "file": "git2/indexer.h", + "line": 73, + "lineto": 100, + "block": "unsigned int version\ngit_indexer_progress_cb progress_cb\nvoid * progress_cb_payload\nunsigned char verify", "tdef": "typedef", - "description": " Time structure used in a git index entry ", + "description": " Options for indexer configuration", "comments": "", "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, { - "type": "int32_t", - "name": "seconds", - "comments": "" + "type": "git_indexer_progress_cb", + "name": "progress_cb", + "comments": " progress_cb function to call with progress information " }, { - "type": "uint32_t", - "name": "nanoseconds", - "comments": "" + "type": "void *", + "name": "progress_cb_payload", + "comments": " progress_cb_payload payload for the progress callback " + }, + { + "type": "unsigned char", + "name": "verify", + "comments": " Do connectivity checks for the received pack " } ], "used": { "returns": [], - "needs": [] + "needs": ["git_indexer_new", "git_indexer_options_init"] } } ], [ - "git_indexcap_t", + "git_indexer_progress", { "decl": [ - "GIT_INDEXCAP_IGNORE_CASE", - "GIT_INDEXCAP_NO_FILEMODE", - "GIT_INDEXCAP_NO_SYMLINKS", - "GIT_INDEXCAP_FROM_OWNER" + "unsigned int total_objects", + "unsigned int indexed_objects", + "unsigned int received_objects", + "unsigned int local_objects", + "unsigned int total_deltas", + "unsigned int indexed_deltas", + "size_t received_bytes" ], - "type": "enum", - "file": "index.h", - "line": 138, - "lineto": 143, - "block": "GIT_INDEXCAP_IGNORE_CASE\nGIT_INDEXCAP_NO_FILEMODE\nGIT_INDEXCAP_NO_SYMLINKS\nGIT_INDEXCAP_FROM_OWNER", + "type": "struct", + "value": "git_indexer_progress", + "file": "git2/indexer.h", + "line": 34, + "lineto": 58, + "block": "unsigned int total_objects\nunsigned int indexed_objects\nunsigned int received_objects\nunsigned int local_objects\nunsigned int total_deltas\nunsigned int indexed_deltas\nsize_t received_bytes", "tdef": "typedef", - "description": " Capabilities of system that affect index actions. ", + "description": " This structure is used to provide callers information about the\n progress of indexing a packfile, either directly or part of a\n fetch or clone that downloads a packfile.", "comments": "", "fields": [ { - "type": "int", - "name": "GIT_INDEXCAP_IGNORE_CASE", - "comments": "", - "value": 1 + "type": "unsigned int", + "name": "total_objects", + "comments": " number of objects in the packfile being indexed " }, { - "type": "int", - "name": "GIT_INDEXCAP_NO_FILEMODE", - "comments": "", - "value": 2 + "type": "unsigned int", + "name": "indexed_objects", + "comments": " received objects that have been hashed " }, { - "type": "int", - "name": "GIT_INDEXCAP_NO_SYMLINKS", - "comments": "", - "value": 4 + "type": "unsigned int", + "name": "received_objects", + "comments": " received_objects: objects which have been downloaded " }, { - "type": "int", - "name": "GIT_INDEXCAP_FROM_OWNER", - "comments": "", - "value": -1 - } - ], - "used": { - "returns": [], - "needs": [] - } - } - ], - [ - "git_indxentry_flag_t", - { - "decl": [ - "GIT_IDXENTRY_EXTENDED", - "GIT_IDXENTRY_VALID" - ], - "type": "enum", - "file": "index.h", - "line": 86, - "lineto": 89, - "block": "GIT_IDXENTRY_EXTENDED\nGIT_IDXENTRY_VALID", - "tdef": "typedef", - "description": " Flags for index entries", - "comments": "", - "fields": [ + "type": "unsigned int", + "name": "local_objects", + "comments": " locally-available objects that have been injected in order\n to fix a thin pack" + }, { - "type": "int", - "name": "GIT_IDXENTRY_EXTENDED", - "comments": "", - "value": 16384 + "type": "unsigned int", + "name": "total_deltas", + "comments": " number of deltas in the packfile being indexed " }, { - "type": "int", - "name": "GIT_IDXENTRY_VALID", - "comments": "", - "value": 32768 + "type": "unsigned int", + "name": "indexed_deltas", + "comments": " received deltas that have been indexed " + }, + { + "type": "size_t", + "name": "received_bytes", + "comments": " size of the packfile received up to now " } ], "used": { - "returns": [], - "needs": [] + "returns": ["git_remote_stats"], + "needs": [ + "git_indexer_append", + "git_indexer_commit", + "git_indexer_progress_cb", + "git_odb_write_pack", + "git_packbuilder_write" + ] } } ], @@ -31975,13 +30836,36 @@ "GIT_OPT_ENABLE_FSYNC_GITDIR", "GIT_OPT_GET_WINDOWS_SHAREMODE", "GIT_OPT_SET_WINDOWS_SHAREMODE", - "GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION" + "GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION", + "GIT_OPT_SET_ALLOCATOR", + "GIT_OPT_ENABLE_UNSAVED_INDEX_SAFETY", + "GIT_OPT_GET_PACK_MAX_OBJECTS", + "GIT_OPT_SET_PACK_MAX_OBJECTS", + "GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS", + "GIT_OPT_ENABLE_HTTP_EXPECT_CONTINUE", + "GIT_OPT_GET_MWINDOW_FILE_LIMIT", + "GIT_OPT_SET_MWINDOW_FILE_LIMIT", + "GIT_OPT_SET_ODB_PACKED_PRIORITY", + "GIT_OPT_SET_ODB_LOOSE_PRIORITY", + "GIT_OPT_GET_EXTENSIONS", + "GIT_OPT_SET_EXTENSIONS", + "GIT_OPT_GET_OWNER_VALIDATION", + "GIT_OPT_SET_OWNER_VALIDATION", + "GIT_OPT_GET_HOMEDIR", + "GIT_OPT_SET_HOMEDIR", + "GIT_OPT_SET_SERVER_CONNECT_TIMEOUT", + "GIT_OPT_GET_SERVER_CONNECT_TIMEOUT", + "GIT_OPT_SET_SERVER_TIMEOUT", + "GIT_OPT_GET_SERVER_TIMEOUT", + "GIT_OPT_SET_USER_AGENT_PRODUCT", + "GIT_OPT_GET_USER_AGENT_PRODUCT", + "GIT_OPT_ADD_SSL_X509_CERT" ], "type": "enum", - "file": "common.h", - "line": 162, - "lineto": 186, - "block": "GIT_OPT_GET_MWINDOW_SIZE\nGIT_OPT_SET_MWINDOW_SIZE\nGIT_OPT_GET_MWINDOW_MAPPED_LIMIT\nGIT_OPT_SET_MWINDOW_MAPPED_LIMIT\nGIT_OPT_GET_SEARCH_PATH\nGIT_OPT_SET_SEARCH_PATH\nGIT_OPT_SET_CACHE_OBJECT_LIMIT\nGIT_OPT_SET_CACHE_MAX_SIZE\nGIT_OPT_ENABLE_CACHING\nGIT_OPT_GET_CACHED_MEMORY\nGIT_OPT_GET_TEMPLATE_PATH\nGIT_OPT_SET_TEMPLATE_PATH\nGIT_OPT_SET_SSL_CERT_LOCATIONS\nGIT_OPT_SET_USER_AGENT\nGIT_OPT_ENABLE_STRICT_OBJECT_CREATION\nGIT_OPT_ENABLE_STRICT_SYMBOLIC_REF_CREATION\nGIT_OPT_SET_SSL_CIPHERS\nGIT_OPT_GET_USER_AGENT\nGIT_OPT_ENABLE_OFS_DELTA\nGIT_OPT_ENABLE_FSYNC_GITDIR\nGIT_OPT_GET_WINDOWS_SHAREMODE\nGIT_OPT_SET_WINDOWS_SHAREMODE\nGIT_OPT_ENABLE_STRICT_HASH_VERIFICATION", + "file": "git2/common.h", + "line": 214, + "lineto": 261, + "block": "GIT_OPT_GET_MWINDOW_SIZE\nGIT_OPT_SET_MWINDOW_SIZE\nGIT_OPT_GET_MWINDOW_MAPPED_LIMIT\nGIT_OPT_SET_MWINDOW_MAPPED_LIMIT\nGIT_OPT_GET_SEARCH_PATH\nGIT_OPT_SET_SEARCH_PATH\nGIT_OPT_SET_CACHE_OBJECT_LIMIT\nGIT_OPT_SET_CACHE_MAX_SIZE\nGIT_OPT_ENABLE_CACHING\nGIT_OPT_GET_CACHED_MEMORY\nGIT_OPT_GET_TEMPLATE_PATH\nGIT_OPT_SET_TEMPLATE_PATH\nGIT_OPT_SET_SSL_CERT_LOCATIONS\nGIT_OPT_SET_USER_AGENT\nGIT_OPT_ENABLE_STRICT_OBJECT_CREATION\nGIT_OPT_ENABLE_STRICT_SYMBOLIC_REF_CREATION\nGIT_OPT_SET_SSL_CIPHERS\nGIT_OPT_GET_USER_AGENT\nGIT_OPT_ENABLE_OFS_DELTA\nGIT_OPT_ENABLE_FSYNC_GITDIR\nGIT_OPT_GET_WINDOWS_SHAREMODE\nGIT_OPT_SET_WINDOWS_SHAREMODE\nGIT_OPT_ENABLE_STRICT_HASH_VERIFICATION\nGIT_OPT_SET_ALLOCATOR\nGIT_OPT_ENABLE_UNSAVED_INDEX_SAFETY\nGIT_OPT_GET_PACK_MAX_OBJECTS\nGIT_OPT_SET_PACK_MAX_OBJECTS\nGIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS\nGIT_OPT_ENABLE_HTTP_EXPECT_CONTINUE\nGIT_OPT_GET_MWINDOW_FILE_LIMIT\nGIT_OPT_SET_MWINDOW_FILE_LIMIT\nGIT_OPT_SET_ODB_PACKED_PRIORITY\nGIT_OPT_SET_ODB_LOOSE_PRIORITY\nGIT_OPT_GET_EXTENSIONS\nGIT_OPT_SET_EXTENSIONS\nGIT_OPT_GET_OWNER_VALIDATION\nGIT_OPT_SET_OWNER_VALIDATION\nGIT_OPT_GET_HOMEDIR\nGIT_OPT_SET_HOMEDIR\nGIT_OPT_SET_SERVER_CONNECT_TIMEOUT\nGIT_OPT_GET_SERVER_CONNECT_TIMEOUT\nGIT_OPT_SET_SERVER_TIMEOUT\nGIT_OPT_GET_SERVER_TIMEOUT\nGIT_OPT_SET_USER_AGENT_PRODUCT\nGIT_OPT_GET_USER_AGENT_PRODUCT\nGIT_OPT_ADD_SSL_X509_CERT", "tdef": "typedef", "description": " Global library options", "comments": "

These are used to select which global option to set or get and are used in git_libgit2_opts().

\n", @@ -32123,11 +31007,174 @@ "name": "GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION", "comments": "", "value": 22 + }, + { + "type": "int", + "name": "GIT_OPT_SET_ALLOCATOR", + "comments": "", + "value": 23 + }, + { + "type": "int", + "name": "GIT_OPT_ENABLE_UNSAVED_INDEX_SAFETY", + "comments": "", + "value": 24 + }, + { + "type": "int", + "name": "GIT_OPT_GET_PACK_MAX_OBJECTS", + "comments": "", + "value": 25 + }, + { + "type": "int", + "name": "GIT_OPT_SET_PACK_MAX_OBJECTS", + "comments": "", + "value": 26 + }, + { + "type": "int", + "name": "GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS", + "comments": "", + "value": 27 + }, + { + "type": "int", + "name": "GIT_OPT_ENABLE_HTTP_EXPECT_CONTINUE", + "comments": "", + "value": 28 + }, + { + "type": "int", + "name": "GIT_OPT_GET_MWINDOW_FILE_LIMIT", + "comments": "", + "value": 29 + }, + { + "type": "int", + "name": "GIT_OPT_SET_MWINDOW_FILE_LIMIT", + "comments": "", + "value": 30 + }, + { + "type": "int", + "name": "GIT_OPT_SET_ODB_PACKED_PRIORITY", + "comments": "", + "value": 31 + }, + { + "type": "int", + "name": "GIT_OPT_SET_ODB_LOOSE_PRIORITY", + "comments": "", + "value": 32 + }, + { + "type": "int", + "name": "GIT_OPT_GET_EXTENSIONS", + "comments": "", + "value": 33 + }, + { + "type": "int", + "name": "GIT_OPT_SET_EXTENSIONS", + "comments": "", + "value": 34 + }, + { + "type": "int", + "name": "GIT_OPT_GET_OWNER_VALIDATION", + "comments": "", + "value": 35 + }, + { + "type": "int", + "name": "GIT_OPT_SET_OWNER_VALIDATION", + "comments": "", + "value": 36 + }, + { + "type": "int", + "name": "GIT_OPT_GET_HOMEDIR", + "comments": "", + "value": 37 + }, + { + "type": "int", + "name": "GIT_OPT_SET_HOMEDIR", + "comments": "", + "value": 38 + }, + { + "type": "int", + "name": "GIT_OPT_SET_SERVER_CONNECT_TIMEOUT", + "comments": "", + "value": 39 + }, + { + "type": "int", + "name": "GIT_OPT_GET_SERVER_CONNECT_TIMEOUT", + "comments": "", + "value": 40 + }, + { + "type": "int", + "name": "GIT_OPT_SET_SERVER_TIMEOUT", + "comments": "", + "value": 41 + }, + { + "type": "int", + "name": "GIT_OPT_GET_SERVER_TIMEOUT", + "comments": "", + "value": 42 + }, + { + "type": "int", + "name": "GIT_OPT_SET_USER_AGENT_PRODUCT", + "comments": "", + "value": 43 + }, + { + "type": "int", + "name": "GIT_OPT_GET_USER_AGENT_PRODUCT", + "comments": "", + "value": 44 + }, + { + "type": "int", + "name": "GIT_OPT_ADD_SSL_X509_CERT", + "comments": "", + "value": 45 } ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_mailmap", + { + "decl": "git_mailmap", + "type": "struct", + "value": "git_mailmap", + "file": "git2/types.h", + "line": 382, + "lineto": 382, + "tdef": "typedef", + "description": " Representation of .mailmap file state. ", + "comments": "", "used": { "returns": [], - "needs": [] + "needs": [ + "git_commit_author_with_mailmap", + "git_commit_committer_with_mailmap", + "git_mailmap_add_entry", + "git_mailmap_free", + "git_mailmap_from_buffer", + "git_mailmap_from_repository", + "git_mailmap_new", + "git_mailmap_resolve", + "git_mailmap_resolve_signature" + ] } } ], @@ -32142,9 +31189,9 @@ "GIT_MERGE_ANALYSIS_UNBORN" ], "type": "enum", - "file": "merge.h", - "line": 318, - "lineto": 347, + "file": "git2/merge.h", + "line": 347, + "lineto": 376, "block": "GIT_MERGE_ANALYSIS_NONE\nGIT_MERGE_ANALYSIS_NORMAL\nGIT_MERGE_ANALYSIS_UP_TO_DATE\nGIT_MERGE_ANALYSIS_FASTFORWARD\nGIT_MERGE_ANALYSIS_UNBORN", "tdef": "typedef", "description": " The results of `git_merge_analysis` indicate the merge opportunities.", @@ -32183,57 +31230,7 @@ ], "used": { "returns": [], - "needs": [ - "git_merge_analysis" - ] - } - } - ], - [ - "git_merge_driver", - { - "decl": [ - "unsigned int version", - "git_merge_driver_init_fn initialize", - "git_merge_driver_shutdown_fn shutdown", - "git_merge_driver_apply_fn apply" - ], - "type": "struct", - "value": "git_merge_driver", - "file": "sys/merge.h", - "line": 118, - "lineto": 135, - "block": "unsigned int version\ngit_merge_driver_init_fn initialize\ngit_merge_driver_shutdown_fn shutdown\ngit_merge_driver_apply_fn apply", - "tdef": null, - "description": " Merge driver structure used to register custom merge drivers.", - "comments": "

To associate extra data with a driver, allocate extra data and put the git_merge_driver struct at the start of your data buffer, then cast the self pointer to your larger structure when your callback is invoked.

\n", - "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": " The `version` should be set to `GIT_MERGE_DRIVER_VERSION`. " - }, - { - "type": "git_merge_driver_init_fn", - "name": "initialize", - "comments": " Called when the merge driver is first used for any file. " - }, - { - "type": "git_merge_driver_shutdown_fn", - "name": "shutdown", - "comments": " Called when the merge driver is unregistered from the system. " - }, - { - "type": "git_merge_driver_apply_fn", - "name": "apply", - "comments": " Called to merge the contents of a conflict. If this function\n returns `GIT_PASSTHROUGH` then the default (`text`) merge driver\n will instead be invoked. If this function returns\n `GIT_EMERGECONFLICT` then the file will remain conflicted." - } - ], - "used": { - "returns": [], - "needs": [ - "git_merge_driver_apply_fn" - ] + "needs": ["git_merge_analysis", "git_merge_analysis_for_ref"] } } ], @@ -32243,18 +31240,13 @@ "decl": "git_merge_driver_source", "type": "struct", "value": "git_merge_driver_source", - "file": "sys/merge.h", - "line": 36, - "lineto": 36, + "file": "git2/sys/merge.h", + "line": 49, + "lineto": 49, "tdef": "typedef", "description": " A merge driver source represents the file to be merged", "comments": "", - "used": { - "returns": [], - "needs": [ - "git_merge_driver_apply_fn" - ] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -32267,9 +31259,9 @@ "GIT_MERGE_FILE_FAVOR_UNION" ], "type": "enum", - "file": "merge.h", - "line": 101, - "lineto": 131, + "file": "git2/merge.h", + "line": 115, + "lineto": 145, "block": "GIT_MERGE_FILE_FAVOR_NORMAL\nGIT_MERGE_FILE_FAVOR_OURS\nGIT_MERGE_FILE_FAVOR_THEIRS\nGIT_MERGE_FILE_FAVOR_UNION", "tdef": "typedef", "description": " Merge file favor options for `git_merge_options` instruct the file-level\n merging functionality how to deal with conflicting regions of the files.", @@ -32300,10 +31292,7 @@ "value": 3 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -32318,13 +31307,15 @@ "GIT_MERGE_FILE_IGNORE_WHITESPACE_CHANGE", "GIT_MERGE_FILE_IGNORE_WHITESPACE_EOL", "GIT_MERGE_FILE_DIFF_PATIENCE", - "GIT_MERGE_FILE_DIFF_MINIMAL" + "GIT_MERGE_FILE_DIFF_MINIMAL", + "GIT_MERGE_FILE_STYLE_ZDIFF3", + "GIT_MERGE_FILE_ACCEPT_CONFLICTS" ], "type": "enum", - "file": "merge.h", - "line": 136, - "lineto": 163, - "block": "GIT_MERGE_FILE_DEFAULT\nGIT_MERGE_FILE_STYLE_MERGE\nGIT_MERGE_FILE_STYLE_DIFF3\nGIT_MERGE_FILE_SIMPLIFY_ALNUM\nGIT_MERGE_FILE_IGNORE_WHITESPACE\nGIT_MERGE_FILE_IGNORE_WHITESPACE_CHANGE\nGIT_MERGE_FILE_IGNORE_WHITESPACE_EOL\nGIT_MERGE_FILE_DIFF_PATIENCE\nGIT_MERGE_FILE_DIFF_MINIMAL", + "file": "git2/merge.h", + "line": 150, + "lineto": 187, + "block": "GIT_MERGE_FILE_DEFAULT\nGIT_MERGE_FILE_STYLE_MERGE\nGIT_MERGE_FILE_STYLE_DIFF3\nGIT_MERGE_FILE_SIMPLIFY_ALNUM\nGIT_MERGE_FILE_IGNORE_WHITESPACE\nGIT_MERGE_FILE_IGNORE_WHITESPACE_CHANGE\nGIT_MERGE_FILE_IGNORE_WHITESPACE_EOL\nGIT_MERGE_FILE_DIFF_PATIENCE\nGIT_MERGE_FILE_DIFF_MINIMAL\nGIT_MERGE_FILE_STYLE_ZDIFF3\nGIT_MERGE_FILE_ACCEPT_CONFLICTS", "tdef": "typedef", "description": " File merging flags", "comments": "", @@ -32382,12 +31373,21 @@ "name": "GIT_MERGE_FILE_DIFF_MINIMAL", "comments": "

Take extra time to find minimal diff

\n", "value": 128 + }, + { + "type": "int", + "name": "GIT_MERGE_FILE_STYLE_ZDIFF3", + "comments": "

Create zdiff3 ("zealous diff3")-style files

\n", + "value": 256 + }, + { + "type": "int", + "name": "GIT_MERGE_FILE_ACCEPT_CONFLICTS", + "comments": "

Do not produce file conflicts when common regions have\n changed; keep the conflict markers in the file and accept\n that as the merge result.

\n", + "value": 512 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -32402,19 +31402,15 @@ ], "type": "struct", "value": "git_merge_file_input", - "file": "merge.h", - "line": 32, - "lineto": 46, + "file": "git2/merge.h", + "line": 35, + "lineto": 49, "block": "unsigned int version\nconst char * ptr\nsize_t size\nconst char * path\nunsigned int mode", "tdef": "typedef", "description": " The file inputs to `git_merge_file`. Callers should populate the\n `git_merge_file_input` structure with descriptions of the files in\n each side of the conflict for use in producing the merge file.", "comments": "", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "const char *", "name": "ptr", @@ -32438,10 +31434,7 @@ ], "used": { "returns": [], - "needs": [ - "git_merge_file", - "git_merge_file_init_input" - ] + "needs": ["git_merge_file", "git_merge_file_input_init"] } } ], @@ -32454,24 +31447,20 @@ "const char * our_label", "const char * their_label", "git_merge_file_favor_t favor", - "git_merge_file_flag_t flags", + "uint32_t flags", "unsigned short marker_size" ], "type": "struct", "value": "git_merge_file_options", - "file": "merge.h", - "line": 170, - "lineto": 200, - "block": "unsigned int version\nconst char * ancestor_label\nconst char * our_label\nconst char * their_label\ngit_merge_file_favor_t favor\ngit_merge_file_flag_t flags\nunsigned short marker_size", + "file": "git2/merge.h", + "line": 195, + "lineto": 225, + "block": "unsigned int version\nconst char * ancestor_label\nconst char * our_label\nconst char * their_label\ngit_merge_file_favor_t favor\nuint32_t flags\nunsigned short marker_size", "tdef": "typedef", "description": " Options for merging a file", "comments": "", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "const char *", "name": "ancestor_label", @@ -32493,7 +31482,7 @@ "comments": " The file to favor in region conflicts. " }, { - "type": "git_merge_file_flag_t", + "type": "uint32_t", "name": "flags", "comments": " see `git_merge_file_flag_t` above " }, @@ -32508,7 +31497,7 @@ "needs": [ "git_merge_file", "git_merge_file_from_index", - "git_merge_file_init_options" + "git_merge_file_options_init" ] } } @@ -32525,9 +31514,9 @@ ], "type": "struct", "value": "git_merge_file_result", - "file": "merge.h", - "line": 221, - "lineto": 242, + "file": "git2/merge.h", + "line": 248, + "lineto": 269, "block": "unsigned int automergeable\nconst char * path\nunsigned int mode\nconst char * ptr\nsize_t len", "tdef": "typedef", "description": " Information about file-level merging", @@ -32576,13 +31565,14 @@ "GIT_MERGE_FIND_RENAMES", "GIT_MERGE_FAIL_ON_CONFLICT", "GIT_MERGE_SKIP_REUC", - "GIT_MERGE_NO_RECURSIVE" + "GIT_MERGE_NO_RECURSIVE", + "GIT_MERGE_VIRTUAL_BASE" ], "type": "enum", - "file": "merge.h", - "line": 68, - "lineto": 95, - "block": "GIT_MERGE_FIND_RENAMES\nGIT_MERGE_FAIL_ON_CONFLICT\nGIT_MERGE_SKIP_REUC\nGIT_MERGE_NO_RECURSIVE", + "file": "git2/merge.h", + "line": 74, + "lineto": 109, + "block": "GIT_MERGE_FIND_RENAMES\nGIT_MERGE_FAIL_ON_CONFLICT\nGIT_MERGE_SKIP_REUC\nGIT_MERGE_NO_RECURSIVE\nGIT_MERGE_VIRTUAL_BASE", "tdef": "typedef", "description": " Flags for `git_merge` options. A combination of these flags can be\n passed in via the `flags` value in the `git_merge_options`.", "comments": "", @@ -32610,12 +31600,15 @@ "name": "GIT_MERGE_NO_RECURSIVE", "comments": "

If the commits being merged have multiple merge bases, do not build\n a recursive merge base (by merging the multiple merge bases),\n instead simply use the first base. This flag provides a similar\n merge base to git-merge-resolve.

\n", "value": 8 + }, + { + "type": "int", + "name": "GIT_MERGE_VIRTUAL_BASE", + "comments": "

Treat this merge as if it is to produce the virtual base\n of a recursive merge. This will ensure that there are\n no conflicts, any conflicting regions will keep conflict\n markers in the merge result.

\n", + "value": 16 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -32623,32 +31616,28 @@ { "decl": [ "unsigned int version", - "git_merge_flag_t flags", + "uint32_t flags", "unsigned int rename_threshold", "unsigned int target_limit", "git_diff_similarity_metric * metric", "unsigned int recursion_limit", "const char * default_driver", "git_merge_file_favor_t file_favor", - "git_merge_file_flag_t file_flags" + "uint32_t file_flags" ], "type": "struct", "value": "git_merge_options", - "file": "merge.h", - "line": 247, - "lineto": 296, - "block": "unsigned int version\ngit_merge_flag_t flags\nunsigned int rename_threshold\nunsigned int target_limit\ngit_diff_similarity_metric * metric\nunsigned int recursion_limit\nconst char * default_driver\ngit_merge_file_favor_t file_favor\ngit_merge_file_flag_t file_flags", + "file": "git2/merge.h", + "line": 274, + "lineto": 323, + "block": "unsigned int version\nuint32_t flags\nunsigned int rename_threshold\nunsigned int target_limit\ngit_diff_similarity_metric * metric\nunsigned int recursion_limit\nconst char * default_driver\ngit_merge_file_favor_t file_favor\nuint32_t file_flags", "tdef": "typedef", "description": " Merging options", "comments": "", "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, { - "type": "unsigned int", - "name": "version", - "comments": "" - }, - { - "type": "git_merge_flag_t", + "type": "uint32_t", "name": "flags", "comments": " See `git_merge_flag_t` above " }, @@ -32683,7 +31672,7 @@ "comments": " Flags for handling conflicting content, to be used with the standard\n (`text`) merge driver." }, { - "type": "git_merge_file_flag_t", + "type": "uint32_t", "name": "file_flags", "comments": " see `git_merge_file_flag_t` above " } @@ -32694,7 +31683,7 @@ "git_cherrypick_commit", "git_merge", "git_merge_commits", - "git_merge_init_options", + "git_merge_options_init", "git_merge_trees", "git_revert_commit" ] @@ -32710,9 +31699,9 @@ "GIT_MERGE_PREFERENCE_FASTFORWARD_ONLY" ], "type": "enum", - "file": "merge.h", - "line": 352, - "lineto": 370, + "file": "git2/merge.h", + "line": 381, + "lineto": 399, "block": "GIT_MERGE_PREFERENCE_NONE\nGIT_MERGE_PREFERENCE_NO_FASTFORWARD\nGIT_MERGE_PREFERENCE_FASTFORWARD_ONLY", "tdef": "typedef", "description": " The user's stated preference for merges.", @@ -32739,40 +31728,17 @@ ], "used": { "returns": [], - "needs": [ - "git_merge_analysis" - ] - } - } - ], - [ - "git_merge_result", - { - "decl": "git_merge_result", - "type": "struct", - "value": "git_merge_result", - "file": "types.h", - "line": 185, - "lineto": 185, - "tdef": "typedef", - "description": " Merge result ", - "comments": "", - "used": { - "returns": [], - "needs": [] + "needs": ["git_merge_analysis", "git_merge_analysis_for_ref"] } } ], [ "git_message_trailer", { - "decl": [ - "const char * key", - "const char * value" - ], + "decl": ["const char * key", "const char * value"], "type": "struct", "value": "git_message_trailer", - "file": "message.h", + "file": "git2/message.h", "line": 43, "lineto": 46, "block": "const char * key\nconst char * value", @@ -32780,23 +31746,12 @@ "description": " Represents a single git message trailer.", "comments": "", "fields": [ - { - "type": "const char *", - "name": "key", - "comments": "" - }, - { - "type": "const char *", - "name": "value", - "comments": "" - } + { "type": "const char *", "name": "key", "comments": "" }, + { "type": "const char *", "name": "value", "comments": "" } ], "used": { "returns": [], - "needs": [ - "git_message_trailer_array_free", - "git_message_trailers" - ] + "needs": ["git_message_trailer_array_free", "git_message_trailers"] } } ], @@ -32810,7 +31765,7 @@ ], "type": "struct", "value": "git_message_trailer_array", - "file": "message.h", + "file": "git2/message.h", "line": 54, "lineto": 60, "block": "git_message_trailer * trailers\nsize_t count\nchar * _trailer_block", @@ -32823,54 +31778,45 @@ "name": "trailers", "comments": "" }, - { - "type": "size_t", - "name": "count", - "comments": "" - }, - { - "type": "char *", - "name": "_trailer_block", - "comments": "" - } + { "type": "size_t", "name": "count", "comments": "" }, + { "type": "char *", "name": "_trailer_block", "comments": "" } ], "used": { "returns": [], - "needs": [ - "git_message_trailer_array_free", - "git_message_trailers" - ] + "needs": ["git_message_trailer_array_free", "git_message_trailers"] } } ], + [ + "git_midx_writer", + { + "decl": "git_midx_writer", + "type": "struct", + "value": "git_midx_writer", + "file": "git2/types.h", + "line": 105, + "lineto": 105, + "tdef": "typedef", + "description": " a writer for multi-pack-index files. ", + "comments": "", + "used": { "returns": [], "needs": [] } + } + ], [ "git_note", { "decl": "git_note", "type": "struct", "value": "git_note", - "file": "types.h", - "line": 153, - "lineto": 153, + "file": "git2/types.h", + "line": 174, + "lineto": 174, "tdef": "typedef", "description": " Representation of a git note ", "comments": "", "used": { "returns": [], - "needs": [ - "git_note_author", - "git_note_commit_iterator_new", - "git_note_commit_read", - "git_note_committer", - "git_note_foreach", - "git_note_free", - "git_note_id", - "git_note_iterator_free", - "git_note_iterator_new", - "git_note_message", - "git_note_next", - "git_note_read" - ] + "needs": ["git_note_iterator_free", "git_note_next"] } } ], @@ -32880,20 +31826,15 @@ "decl": "git_note_iterator", "type": "struct", "value": "git_note_iterator", - "file": "notes.h", - "line": 35, - "lineto": 35, + "file": "git2/notes.h", + "line": 37, + "lineto": 37, "tdef": "typedef", "description": " note iterator", "comments": "", "used": { "returns": [], - "needs": [ - "git_note_commit_iterator_new", - "git_note_iterator_free", - "git_note_iterator_new", - "git_note_next" - ] + "needs": ["git_note_iterator_free", "git_note_next"] } } ], @@ -32903,17 +31844,25 @@ "decl": "git_object", "type": "struct", "value": "git_object", - "file": "types.h", - "line": 111, - "lineto": 111, + "file": "git2/types.h", + "line": 129, + "lineto": 129, "tdef": "typedef", "description": " Representation of a generic object in a repository ", "comments": "", "used": { - "returns": [], + "returns": [ + "git_blob_rawsize", + "git_object_string2type", + "git_object_type", + "git_odb_object_type", + "git_tag_target_type", + "git_tree_entry_type" + ], "needs": [ "git_checkout_tree", "git_describe_commit", + "git_object__size", "git_object_dup", "git_object_free", "git_object_id", @@ -32922,9 +31871,19 @@ "git_object_lookup_prefix", "git_object_owner", "git_object_peel", + "git_object_rawcontent_is_valid", "git_object_short_id", "git_object_type", + "git_object_type2string", + "git_object_typeisloose", + "git_odb_hash", + "git_odb_hashfile", + "git_odb_open_rstream", + "git_odb_open_wstream", + "git_odb_read_header", + "git_odb_write", "git_reference_peel", + "git_repository_hashfile", "git_reset", "git_reset_default", "git_revparse_ext", @@ -32939,25 +31898,122 @@ } } ], + [ + "git_object_t", + { + "decl": [ + "GIT_OBJECT_ANY", + "GIT_OBJECT_INVALID", + "GIT_OBJECT_COMMIT", + "GIT_OBJECT_TREE", + "GIT_OBJECT_BLOB", + "GIT_OBJECT_TAG", + "GIT_OBJECT_OFS_DELTA", + "GIT_OBJECT_REF_DELTA" + ], + "type": "enum", + "file": "git2/types.h", + "line": 73, + "lineto": 82, + "block": "GIT_OBJECT_ANY\nGIT_OBJECT_INVALID\nGIT_OBJECT_COMMIT\nGIT_OBJECT_TREE\nGIT_OBJECT_BLOB\nGIT_OBJECT_TAG\nGIT_OBJECT_OFS_DELTA\nGIT_OBJECT_REF_DELTA", + "tdef": "typedef", + "description": " Basic type (loose or packed) of any Git object. ", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_OBJECT_ANY", + "comments": "

Object can be any of the following

\n", + "value": -2 + }, + { + "type": "int", + "name": "GIT_OBJECT_INVALID", + "comments": "

Object is invalid.

\n", + "value": -1 + }, + { + "type": "int", + "name": "GIT_OBJECT_COMMIT", + "comments": "

A commit object.

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_OBJECT_TREE", + "comments": "

A tree (directory listing) object.

\n", + "value": 2 + }, + { + "type": "int", + "name": "GIT_OBJECT_BLOB", + "comments": "

A file revision object.

\n", + "value": 3 + }, + { + "type": "int", + "name": "GIT_OBJECT_TAG", + "comments": "

An annotated tag object.

\n", + "value": 4 + }, + { + "type": "int", + "name": "GIT_OBJECT_OFS_DELTA", + "comments": "

A delta, base is given by an offset.

\n", + "value": 6 + }, + { + "type": "int", + "name": "GIT_OBJECT_REF_DELTA", + "comments": "

A delta, base is given by object id.

\n", + "value": 7 + } + ], + "used": { + "returns": [ + "git_object_string2type", + "git_object_type", + "git_odb_object_type", + "git_tag_target_type", + "git_tree_entry_type" + ], + "needs": [ + "git_object__size", + "git_object_lookup", + "git_object_lookup_bypath", + "git_object_lookup_prefix", + "git_object_peel", + "git_object_rawcontent_is_valid", + "git_object_type2string", + "git_object_typeisloose", + "git_odb_hash", + "git_odb_hashfile", + "git_odb_open_rstream", + "git_odb_open_wstream", + "git_odb_read_header", + "git_odb_write", + "git_reference_peel", + "git_repository_hashfile" + ] + } + } + ], [ "git_odb", { "decl": "git_odb", "type": "struct", "value": "git_odb", - "file": "types.h", - "line": 81, - "lineto": 81, + "file": "git2/types.h", + "line": 88, + "lineto": 88, "tdef": "typedef", - "description": " An open object database handle. ", + "description": " An object database stores the objects (commit, trees, blobs, tags,\n etc) for a repository.", "comments": "", "used": { "returns": [], "needs": [ "git_indexer_new", - "git_mempack_dump", - "git_mempack_new", - "git_mempack_reset", "git_odb_add_alternate", "git_odb_add_backend", "git_odb_add_disk_alternate", @@ -32965,12 +32021,12 @@ "git_odb_backend_one_pack", "git_odb_backend_pack", "git_odb_exists", + "git_odb_exists_ext", "git_odb_exists_prefix", "git_odb_expand_ids", "git_odb_foreach", "git_odb_free", "git_odb_get_backend", - "git_odb_init_backend", "git_odb_new", "git_odb_num_backends", "git_odb_object_data", @@ -32986,14 +32042,15 @@ "git_odb_read_header", "git_odb_read_prefix", "git_odb_refresh", + "git_odb_set_commit_graph", "git_odb_stream_finalize_write", "git_odb_stream_free", "git_odb_stream_read", "git_odb_stream_write", "git_odb_write", + "git_odb_write_multi_pack_index", "git_odb_write_pack", "git_repository_odb", - "git_repository_set_odb", "git_repository_wrap_odb" ] } @@ -33005,121 +32062,118 @@ "decl": "git_odb_backend", "type": "struct", "value": "git_odb_backend", - "file": "types.h", - "line": 84, - "lineto": 84, - "block": "unsigned int version\ngit_odb * odb\nint (*)(void **, size_t *, git_otype *, git_odb_backend *, const git_oid *) read\nint (*)(git_oid *, void **, size_t *, git_otype *, git_odb_backend *, const git_oid *, size_t) read_prefix\nint (*)(size_t *, git_otype *, git_odb_backend *, const git_oid *) read_header\nint (*)(git_odb_backend *, const git_oid *, const void *, size_t, git_otype) write\nint (*)(git_odb_stream **, git_odb_backend *, git_off_t, git_otype) writestream\nint (*)(git_odb_stream **, size_t *, git_otype *, git_odb_backend *, const git_oid *) readstream\nint (*)(git_odb_backend *, const git_oid *) exists\nint (*)(git_oid *, git_odb_backend *, const git_oid *, size_t) exists_prefix\nint (*)(git_odb_backend *) refresh\nint (*)(git_odb_backend *, git_odb_foreach_cb, void *) foreach\nint (*)(git_odb_writepack **, git_odb_backend *, git_odb *, git_transfer_progress_cb, void *) writepack\nint (*)(git_odb_backend *, const git_oid *) freshen\nvoid (*)(git_odb_backend *) free", + "file": "git2/types.h", + "line": 91, + "lineto": 91, "tdef": "typedef", "description": " A custom backend in an ODB ", "comments": "", + "used": { + "returns": [], + "needs": [ + "git_odb_add_alternate", + "git_odb_add_backend", + "git_odb_backend_loose", + "git_odb_backend_one_pack", + "git_odb_backend_pack", + "git_odb_get_backend" + ] + } + } + ], + [ + "git_odb_backend_loose_options", + { + "decl": [ + "unsigned int version", + "uint32_t flags", + "int compression_level", + "unsigned int dir_mode", + "unsigned int file_mode", + "git_oid_t oid_type" + ], + "type": "struct", + "value": "git_odb_backend_loose_options", + "file": "git2/odb_backend.h", + "line": 49, + "lineto": 75, + "block": "unsigned int version\nuint32_t flags\nint compression_level\nunsigned int dir_mode\nunsigned int file_mode\ngit_oid_t oid_type", + "tdef": "typedef", + "description": " Options for configuring a loose object backend. ", + "comments": "", "fields": [ { "type": "unsigned int", "name": "version", - "comments": "" - }, - { - "type": "git_odb *", - "name": "odb", - "comments": "" - }, - { - "type": "int (*)(void **, size_t *, git_otype *, git_odb_backend *, const git_oid *)", - "name": "read", - "comments": "" - }, - { - "type": "int (*)(git_oid *, void **, size_t *, git_otype *, git_odb_backend *, const git_oid *, size_t)", - "name": "read_prefix", - "comments": "" - }, - { - "type": "int (*)(size_t *, git_otype *, git_odb_backend *, const git_oid *)", - "name": "read_header", - "comments": "" - }, - { - "type": "int (*)(git_odb_backend *, const git_oid *, const void *, size_t, git_otype)", - "name": "write", - "comments": " Write an object into the backend. The id of the object has\n already been calculated and is passed in." - }, - { - "type": "int (*)(git_odb_stream **, git_odb_backend *, git_off_t, git_otype)", - "name": "writestream", - "comments": "" - }, - { - "type": "int (*)(git_odb_stream **, size_t *, git_otype *, git_odb_backend *, const git_oid *)", - "name": "readstream", - "comments": "" + "comments": " version for the struct " }, { - "type": "int (*)(git_odb_backend *, const git_oid *)", - "name": "exists", - "comments": "" + "type": "uint32_t", + "name": "flags", + "comments": " A combination of the `git_odb_backend_loose_flag_t` types. " }, { - "type": "int (*)(git_oid *, git_odb_backend *, const git_oid *, size_t)", - "name": "exists_prefix", - "comments": "" + "type": "int", + "name": "compression_level", + "comments": " zlib compression level to use (0-9), where 1 is the fastest\n at the expense of larger files, and 9 produces the best\n compression at the expense of speed. 0 indicates that no\n compression should be performed. -1 is the default (currently\n optimizing for speed)." }, { - "type": "int (*)(git_odb_backend *)", - "name": "refresh", - "comments": " If the backend implements a refreshing mechanism, it should be exposed\n through this endpoint. Each call to `git_odb_refresh()` will invoke it.\n\n However, the backend implementation should try to stay up-to-date as much\n as possible by itself as libgit2 will not automatically invoke\n `git_odb_refresh()`. For instance, a potential strategy for the backend\n implementation to achieve this could be to internally invoke this\n endpoint on failed lookups (ie. `exists()`, `read()`, `read_header()`)." + "type": "unsigned int", + "name": "dir_mode", + "comments": " Permissions to use creating a directory or 0 for defaults " }, { - "type": "int (*)(git_odb_backend *, git_odb_foreach_cb, void *)", - "name": "foreach", - "comments": "" + "type": "unsigned int", + "name": "file_mode", + "comments": " Permissions to use creating a file or 0 for defaults " }, { - "type": "int (*)(git_odb_writepack **, git_odb_backend *, git_odb *, git_transfer_progress_cb, void *)", - "name": "writepack", - "comments": "" - }, + "type": "git_oid_t", + "name": "oid_type", + "comments": " Type of object IDs to use for this object database, or\n 0 for default (currently SHA1)." + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_odb_backend_pack_options", + { + "decl": ["unsigned int version", "git_oid_t oid_type"], + "type": "struct", + "value": "git_odb_backend_pack_options", + "file": "git2/odb_backend.h", + "line": 24, + "lineto": 32, + "block": "unsigned int version\ngit_oid_t oid_type", + "tdef": "typedef", + "description": " Options for configuring a packfile object backend. ", + "comments": "", + "fields": [ { - "type": "int (*)(git_odb_backend *, const git_oid *)", - "name": "freshen", - "comments": " \"Freshens\" an already existing object, updating its last-used\n time. This occurs when `git_odb_write` was called, but the\n object already existed (and will not be re-written). The\n underlying implementation may want to update last-used timestamps.\n\n If callers implement this, they should return `0` if the object\n exists and was freshened, and non-zero otherwise." + "type": "unsigned int", + "name": "version", + "comments": " version for the struct " }, { - "type": "void (*)(git_odb_backend *)", - "name": "free", - "comments": " Frees any resources held by the odb (including the `git_odb_backend`\n itself). An odb backend implementation must provide this function." + "type": "git_oid_t", + "name": "oid_type", + "comments": " Type of object IDs to use for this object database, or\n 0 for default (currently SHA1)." } ], - "used": { - "returns": [], - "needs": [ - "git_mempack_dump", - "git_mempack_new", - "git_mempack_reset", - "git_odb_add_alternate", - "git_odb_add_backend", - "git_odb_backend_loose", - "git_odb_backend_one_pack", - "git_odb_backend_pack", - "git_odb_get_backend", - "git_odb_init_backend" - ] - } + "used": { "returns": [], "needs": [] } } ], [ "git_odb_expand_id", { - "decl": [ - "git_oid id", - "unsigned short length", - "git_otype type" - ], + "decl": ["git_oid id", "unsigned short length", "git_object_t type"], "type": "struct", "value": "git_odb_expand_id", - "file": "odb.h", - "line": 180, - "lineto": 195, - "block": "git_oid id\nunsigned short length\ngit_otype type", + "file": "git2/odb.h", + "line": 250, + "lineto": 265, + "block": "git_oid id\nunsigned short length\ngit_object_t type", "tdef": "typedef", "description": " The information about object IDs to query in `git_odb_expand_ids`,\n which will be populated upon return.", "comments": "", @@ -33135,17 +32189,35 @@ "comments": " The length of the object ID (in nibbles, or packets of 4 bits; the\n number of hex characters)" }, { - "type": "git_otype", + "type": "git_object_t", "name": "type", - "comments": " The (optional) type of the object to search for; leave as `0` or set\n to `GIT_OBJ_ANY` to query for any object matching the ID." + "comments": " The (optional) type of the object to search for; leave as `0` or set\n to `GIT_OBJECT_ANY` to query for any object matching the ID." } ], - "used": { - "returns": [], - "needs": [ - "git_odb_expand_ids" - ] - } + "used": { "returns": [], "needs": ["git_odb_expand_ids"] } + } + ], + [ + "git_odb_lookup_flags_t", + { + "decl": ["GIT_ODB_LOOKUP_NO_REFRESH"], + "type": "enum", + "file": "git2/odb.h", + "line": 26, + "lineto": 34, + "block": "GIT_ODB_LOOKUP_NO_REFRESH", + "tdef": "typedef", + "description": " Flags controlling the behavior of ODB lookup operations ", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_ODB_LOOKUP_NO_REFRESH", + "comments": "

Don't call git_odb_refresh if the lookup fails. Useful when doing\n a batch of lookup operations for objects that may legitimately not\n exist. When using this flag, you may wish to manually call\n git_odb_refresh before processing a batch of objects.

\n", + "value": 1 + } + ], + "used": { "returns": [], "needs": [] } } ], [ @@ -33154,11 +32226,11 @@ "decl": "git_odb_object", "type": "struct", "value": "git_odb_object", - "file": "types.h", - "line": 87, - "lineto": 87, + "file": "git2/types.h", + "line": 96, + "lineto": 96, "tdef": "typedef", - "description": " An object read from the ODB ", + "description": " A \"raw\" object read from the object database.", "comments": "", "used": { "returns": [], @@ -33176,63 +32248,79 @@ } ], [ - "git_odb_stream", + "git_odb_options", { - "decl": "git_odb_stream", + "decl": ["unsigned int version", "git_oid_t oid_type"], "type": "struct", - "value": "git_odb_stream", - "file": "types.h", - "line": 90, - "lineto": 90, - "block": "git_odb_backend * backend\nunsigned int mode\nvoid * hash_ctx\ngit_off_t declared_size\ngit_off_t received_bytes\nint (*)(git_odb_stream *, char *, size_t) read\nint (*)(git_odb_stream *, const char *, size_t) write\nint (*)(git_odb_stream *, const int *) finalize_write\nvoid (*)(git_odb_stream *) free", + "value": "git_odb_options", + "file": "git2/odb.h", + "line": 46, + "lineto": 54, + "block": "unsigned int version\ngit_oid_t oid_type", "tdef": "typedef", - "description": " A stream to read/write from the ODB ", + "description": " Options for configuring a loose object backend. ", "comments": "", "fields": [ - { - "type": "git_odb_backend *", - "name": "backend", - "comments": "" - }, { "type": "unsigned int", - "name": "mode", - "comments": "" + "name": "version", + "comments": " version for the struct " }, { - "type": "void *", - "name": "hash_ctx", - "comments": "" - }, + "type": "git_oid_t", + "name": "oid_type", + "comments": " Type of object IDs to use for this object database, or\n 0 for default (currently SHA1)." + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_odb_stream", + { + "decl": "git_odb_stream", + "type": "struct", + "value": "git_odb_stream", + "file": "git2/types.h", + "line": 99, + "lineto": 99, + "block": "git_odb_backend * backend\nunsigned int mode\nvoid * hash_ctx\ngit_object_size_t declared_size\ngit_object_size_t received_bytes\nint (*)(git_odb_stream *, char *, size_t) read\nint (*)(git_odb_stream *, const char *, size_t) write\nint (*)(git_odb_stream *, const git_oid *) finalize_write\nvoid (*)(git_odb_stream *) free", + "tdef": "typedef", + "description": " A stream to read/write from the ODB ", + "comments": "", + "fields": [ + { "type": "git_odb_backend *", "name": "backend", "comments": "" }, + { "type": "unsigned int", "name": "mode", "comments": "" }, + { "type": "void *", "name": "hash_ctx", "comments": "" }, { - "type": "git_off_t", + "type": "git_object_size_t", "name": "declared_size", "comments": "" }, { - "type": "git_off_t", + "type": "git_object_size_t", "name": "received_bytes", "comments": "" }, { "type": "int (*)(git_odb_stream *, char *, size_t)", "name": "read", - "comments": " Write at most `len` bytes into `buffer` and advance the stream." + "comments": "" }, { "type": "int (*)(git_odb_stream *, const char *, size_t)", "name": "write", - "comments": " Write `len` bytes from `buffer` into the stream." + "comments": "" }, { - "type": "int (*)(git_odb_stream *, const int *)", + "type": "int (*)(git_odb_stream *, const git_oid *)", "name": "finalize_write", - "comments": " Store the contents of the stream as an object with the id\n specified in `oid`.\n\n This method might not be invoked if:\n - an error occurs earlier with the `write` callback,\n - the object referred to by `oid` already exists in any backend, or\n - the final number of received bytes differs from the size declared\n with `git_odb_open_wstream()`" + "comments": "" }, { "type": "void (*)(git_odb_stream *)", "name": "free", - "comments": " Free the stream's memory.\n\n This method might be called without a call to `finalize_write` if\n an error occurs or if the object is already present in the ODB." + "comments": "" } ], "used": { @@ -33251,15 +32339,11 @@ [ "git_odb_stream_t", { - "decl": [ - "GIT_STREAM_RDONLY", - "GIT_STREAM_WRONLY", - "GIT_STREAM_RW" - ], + "decl": ["GIT_STREAM_RDONLY", "GIT_STREAM_WRONLY", "GIT_STREAM_RW"], "type": "enum", - "file": "odb_backend.h", - "line": 70, - "lineto": 74, + "file": "git2/odb_backend.h", + "line": 182, + "lineto": 186, "block": "GIT_STREAM_RDONLY\nGIT_STREAM_WRONLY\nGIT_STREAM_RW", "tdef": "typedef", "description": " Streaming mode ", @@ -33277,17 +32361,9 @@ "comments": "", "value": 4 }, - { - "type": "int", - "name": "GIT_STREAM_RW", - "comments": "", - "value": 6 - } + { "type": "int", "name": "GIT_STREAM_RW", "comments": "", "value": 6 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -33296,26 +32372,22 @@ "decl": "git_odb_writepack", "type": "struct", "value": "git_odb_writepack", - "file": "types.h", - "line": 93, - "lineto": 93, - "block": "git_odb_backend * backend\nint (*)(git_odb_writepack *, const void *, size_t, git_transfer_progress *) append\nint (*)(git_odb_writepack *, git_transfer_progress *) commit\nvoid (*)(git_odb_writepack *) free", + "file": "git2/types.h", + "line": 102, + "lineto": 102, + "block": "git_odb_backend * backend\nint (*)(git_odb_writepack *, const void *, size_t, git_indexer_progress *) append\nint (*)(git_odb_writepack *, git_indexer_progress *) commit\nvoid (*)(git_odb_writepack *) free", "tdef": "typedef", "description": " A stream to write a packfile to the ODB ", "comments": "", "fields": [ + { "type": "git_odb_backend *", "name": "backend", "comments": "" }, { - "type": "git_odb_backend *", - "name": "backend", - "comments": "" - }, - { - "type": "int (*)(git_odb_writepack *, const void *, size_t, git_transfer_progress *)", + "type": "int (*)(git_odb_writepack *, const void *, size_t, git_indexer_progress *)", "name": "append", "comments": "" }, { - "type": "int (*)(git_odb_writepack *, git_transfer_progress *)", + "type": "int (*)(git_odb_writepack *, git_indexer_progress *)", "name": "commit", "comments": "" }, @@ -33325,25 +32397,18 @@ "comments": "" } ], - "used": { - "returns": [], - "needs": [ - "git_odb_write_pack" - ] - } + "used": { "returns": [], "needs": ["git_odb_write_pack"] } } ], [ "git_oid", { - "decl": [ - "unsigned char [20] id" - ], + "decl": ["unsigned char [20] id"], "type": "struct", "value": "git_oid", - "file": "oid.h", - "line": 33, - "lineto": 36, + "file": "git2/oid.h", + "line": 103, + "lineto": 112, "block": "unsigned char [20] id", "tdef": "typedef", "description": " Unique identity of any object (commit, tree, blob, tag). ", @@ -33362,18 +32427,19 @@ "git_commit_id", "git_commit_parent_id", "git_commit_tree_id", - "git_filter_source_id", "git_index_checksum", "git_indexer_hash", - "git_note_id", "git_object_id", "git_odb_object_id", "git_oid_shorten_new", "git_packbuilder_hash", + "git_rebase_onto_id", + "git_rebase_orig_head_id", "git_reference_target", "git_reference_target_peel", "git_reflog_entry_id_new", "git_reflog_entry_id_old", + "git_repository_oid_type", "git_submodule_head_id", "git_submodule_index_id", "git_submodule_wd_id", @@ -33385,16 +32451,16 @@ "needs": [ "git_annotated_commit_from_fetchhead", "git_annotated_commit_lookup", - "git_blob_create_frombuffer", - "git_blob_create_fromdisk", - "git_blob_create_fromstream_commit", - "git_blob_create_fromworkdir", + "git_blob_create_from_buffer", + "git_blob_create_from_disk", + "git_blob_create_from_stream_commit", + "git_blob_create_from_workdir", "git_blob_lookup", "git_blob_lookup_prefix", "git_commit_amend", "git_commit_create", - "git_commit_create_from_callback", - "git_commit_create_from_ids", + "git_commit_create_cb", + "git_commit_create_from_stage", "git_commit_create_v", "git_commit_create_with_signature", "git_commit_extract_signature", @@ -33403,6 +32469,7 @@ "git_diff_patchid", "git_graph_ahead_behind", "git_graph_descendant_of", + "git_graph_reachable_from_any", "git_index_write_tree", "git_index_write_tree_to", "git_merge_base", @@ -33410,17 +32477,12 @@ "git_merge_base_octopus", "git_merge_bases", "git_merge_bases_many", - "git_note_commit_create", - "git_note_commit_read", - "git_note_commit_remove", - "git_note_create", "git_note_foreach_cb", "git_note_next", - "git_note_read", - "git_note_remove", "git_object_lookup", "git_object_lookup_prefix", "git_odb_exists", + "git_odb_exists_ext", "git_odb_exists_prefix", "git_odb_foreach_cb", "git_odb_hash", @@ -33439,7 +32501,7 @@ "git_oid_fromstr", "git_oid_fromstrn", "git_oid_fromstrp", - "git_oid_iszero", + "git_oid_is_zero", "git_oid_ncmp", "git_oid_nfmt", "git_oid_pathfmt", @@ -33449,30 +32511,37 @@ "git_oid_streq", "git_oid_tostr", "git_oid_tostr_s", + "git_oidarray_dispose", "git_oidarray_free", "git_packbuilder_insert", "git_packbuilder_insert_commit", "git_packbuilder_insert_recur", "git_packbuilder_insert_tree", "git_rebase_commit", - "git_reference__alloc", "git_reference_create", "git_reference_create_matching", "git_reference_name_to_id", "git_reference_set_target", "git_reflog_append", + "git_repository_fetchhead_foreach_cb", "git_repository_hashfile", + "git_repository_mergehead_foreach_cb", "git_repository_set_head_detached", "git_revwalk_hide", "git_revwalk_hide_cb", "git_revwalk_next", "git_revwalk_push", + "git_stash_cb", + "git_stash_save", + "git_stash_save_with_opts", "git_tag_annotation_create", "git_tag_create", - "git_tag_create_frombuffer", + "git_tag_create_from_buffer", "git_tag_create_lightweight", + "git_tag_foreach_cb", "git_tag_lookup", "git_tag_lookup_prefix", + "git_transaction_set_target", "git_tree_create_updated", "git_tree_entry_byid", "git_tree_lookup", @@ -33490,170 +32559,65 @@ "decl": "git_oid_shorten", "type": "struct", "value": "git_oid_shorten", - "file": "oid.h", - "line": 215, - "lineto": 215, + "file": "git2/oid.h", + "line": 317, + "lineto": 317, "tdef": "typedef", "description": " OID Shortener object", "comments": "", "used": { - "returns": [ - "git_oid_shorten_new" - ], - "needs": [ - "git_oid_shorten_add", - "git_oid_shorten_free" - ] + "returns": ["git_oid_shorten_new"], + "needs": ["git_oid_shorten_add", "git_oid_shorten_free"] } } ], [ - "git_oidarray", + "git_oid_t", { - "decl": [ - "git_oid * ids", - "size_t count" - ], - "type": "struct", - "value": "git_oidarray", - "file": "oidarray.h", - "line": 16, - "lineto": 19, - "block": "git_oid * ids\nsize_t count", + "decl": ["GIT_OID_SHA1"], + "type": "enum", + "file": "git2/oid.h", + "line": 23, + "lineto": 32, + "block": "GIT_OID_SHA1", "tdef": "typedef", - "description": " Array of object ids ", + "description": " The type of object id. ", "comments": "", "fields": [ { - "type": "git_oid *", - "name": "ids", - "comments": "" - }, - { - "type": "size_t", - "name": "count", - "comments": "" + "type": "int", + "name": "GIT_OID_SHA1", + "comments": "

SHA1

\n", + "value": 1 } ], - "used": { - "returns": [], - "needs": [ - "git_merge_bases", - "git_merge_bases_many", - "git_oidarray_free" - ] - } + "used": { "returns": ["git_repository_oid_type"], "needs": [] } } ], [ - "git_otype", + "git_oidarray", { - "decl": [ - "GIT_OBJ_ANY", - "GIT_OBJ_BAD", - "GIT_OBJ__EXT1", - "GIT_OBJ_COMMIT", - "GIT_OBJ_TREE", - "GIT_OBJ_BLOB", - "GIT_OBJ_TAG", - "GIT_OBJ__EXT2", - "GIT_OBJ_OFS_DELTA", - "GIT_OBJ_REF_DELTA" - ], - "type": "enum", - "file": "types.h", - "line": 67, - "lineto": 78, - "block": "GIT_OBJ_ANY\nGIT_OBJ_BAD\nGIT_OBJ__EXT1\nGIT_OBJ_COMMIT\nGIT_OBJ_TREE\nGIT_OBJ_BLOB\nGIT_OBJ_TAG\nGIT_OBJ__EXT2\nGIT_OBJ_OFS_DELTA\nGIT_OBJ_REF_DELTA", + "decl": ["git_oid * ids", "size_t count"], + "type": "struct", + "value": "git_oidarray", + "file": "git2/oidarray.h", + "line": 23, + "lineto": 26, + "block": "git_oid * ids\nsize_t count", "tdef": "typedef", - "description": " Basic type (loose or packed) of any Git object. ", + "description": " Array of object ids ", "comments": "", "fields": [ - { - "type": "int", - "name": "GIT_OBJ_ANY", - "comments": "

Object can be any of the following

\n", - "value": -2 - }, - { - "type": "int", - "name": "GIT_OBJ_BAD", - "comments": "

Object is invalid.

\n", - "value": -1 - }, - { - "type": "int", - "name": "GIT_OBJ__EXT1", - "comments": "

Reserved for future use.

\n", - "value": 0 - }, - { - "type": "int", - "name": "GIT_OBJ_COMMIT", - "comments": "

A commit object.

\n", - "value": 1 - }, - { - "type": "int", - "name": "GIT_OBJ_TREE", - "comments": "

A tree (directory listing) object.

\n", - "value": 2 - }, - { - "type": "int", - "name": "GIT_OBJ_BLOB", - "comments": "

A file revision object.

\n", - "value": 3 - }, - { - "type": "int", - "name": "GIT_OBJ_TAG", - "comments": "

An annotated tag object.

\n", - "value": 4 - }, - { - "type": "int", - "name": "GIT_OBJ__EXT2", - "comments": "

Reserved for future use.

\n", - "value": 5 - }, - { - "type": "int", - "name": "GIT_OBJ_OFS_DELTA", - "comments": "

A delta, base is given by an offset.

\n", - "value": 6 - }, - { - "type": "int", - "name": "GIT_OBJ_REF_DELTA", - "comments": "

A delta, base is given by object id.

\n", - "value": 7 - } + { "type": "git_oid *", "name": "ids", "comments": "" }, + { "type": "size_t", "name": "count", "comments": "" } ], "used": { - "returns": [ - "git_object_string2type", - "git_object_type", - "git_odb_object_type", - "git_tag_target_type", - "git_tree_entry_type" - ], + "returns": [], "needs": [ - "git_object__size", - "git_object_lookup", - "git_object_lookup_bypath", - "git_object_lookup_prefix", - "git_object_peel", - "git_object_type2string", - "git_object_typeisloose", - "git_odb_hash", - "git_odb_hashfile", - "git_odb_open_rstream", - "git_odb_open_wstream", - "git_odb_read_header", - "git_odb_write", - "git_reference_peel", - "git_repository_hashfile" + "git_merge_bases", + "git_merge_bases_many", + "git_oidarray_dispose", + "git_oidarray_free" ] } } @@ -33664,9 +32628,9 @@ "decl": "git_packbuilder", "type": "struct", "value": "git_packbuilder", - "file": "types.h", - "line": 156, - "lineto": 156, + "file": "git2/types.h", + "line": 177, + "lineto": 177, "tdef": "typedef", "description": " Representation of a git packbuilder ", "comments": "", @@ -33681,11 +32645,13 @@ "git_packbuilder_insert_recur", "git_packbuilder_insert_tree", "git_packbuilder_insert_walk", + "git_packbuilder_name", "git_packbuilder_new", "git_packbuilder_object_count", "git_packbuilder_set_callbacks", "git_packbuilder_set_threads", "git_packbuilder_write", + "git_packbuilder_write_buf", "git_packbuilder_written" ] } @@ -33699,9 +32665,9 @@ "GIT_PACKBUILDER_DELTAFICATION" ], "type": "enum", - "file": "pack.h", - "line": 51, - "lineto": 54, + "file": "git2/pack.h", + "line": 52, + "lineto": 55, "block": "GIT_PACKBUILDER_ADDING_OBJECTS\nGIT_PACKBUILDER_DELTAFICATION", "tdef": "typedef", "description": " Stages that are reported by the packbuilder progress callback.", @@ -33720,10 +32686,7 @@ "value": 1 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -33732,7 +32695,7 @@ "decl": "git_patch", "type": "struct", "value": "git_patch", - "file": "patch.h", + "file": "git2/patch.h", "line": 29, "lineto": 29, "tdef": "typedef", @@ -33752,6 +32715,7 @@ "git_patch_line_stats", "git_patch_num_hunks", "git_patch_num_lines_in_hunk", + "git_patch_owner", "git_patch_print", "git_patch_size", "git_patch_to_buf" @@ -33759,15 +32723,50 @@ } } ], + [ + "git_path_fs", + { + "decl": ["GIT_PATH_FS_GENERIC", "GIT_PATH_FS_NTFS", "GIT_PATH_FS_HFS"], + "type": "enum", + "file": "git2/sys/path.h", + "line": 44, + "lineto": 51, + "block": "GIT_PATH_FS_GENERIC\nGIT_PATH_FS_NTFS\nGIT_PATH_FS_HFS", + "tdef": "typedef", + "description": " The kinds of checks to perform according to which filesystem we are trying to\n protect.", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_PATH_FS_GENERIC", + "comments": "

Do both NTFS- and HFS-specific checks

\n", + "value": 0 + }, + { + "type": "int", + "name": "GIT_PATH_FS_NTFS", + "comments": "

Do NTFS-specific checks only

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_PATH_FS_HFS", + "comments": "

Do HFS-specific checks only

\n", + "value": 2 + } + ], + "used": { "returns": [], "needs": [] } + } + ], [ "git_pathspec", { "decl": "git_pathspec", "type": "struct", "value": "git_pathspec", - "file": "pathspec.h", - "line": 20, - "lineto": 20, + "file": "git2/pathspec.h", + "line": 27, + "lineto": 27, "tdef": "typedef", "description": " Compiled pathspec", "comments": "", @@ -33804,9 +32803,9 @@ "GIT_PATHSPEC_FAILURES_ONLY" ], "type": "enum", - "file": "pathspec.h", - "line": 30, - "lineto": 73, + "file": "git2/pathspec.h", + "line": 37, + "lineto": 80, "block": "GIT_PATHSPEC_DEFAULT\nGIT_PATHSPEC_IGNORE_CASE\nGIT_PATHSPEC_USE_CASE\nGIT_PATHSPEC_NO_GLOB\nGIT_PATHSPEC_NO_MATCH_ERROR\nGIT_PATHSPEC_FIND_FAILURES\nGIT_PATHSPEC_FAILURES_ONLY", "tdef": "typedef", "description": " Options controlling how pathspec match should be executed", @@ -33855,10 +32854,7 @@ "value": 32 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -33867,9 +32863,9 @@ "decl": "git_pathspec_match_list", "type": "struct", "value": "git_pathspec_match_list", - "file": "pathspec.h", - "line": 25, - "lineto": 25, + "file": "git2/pathspec.h", + "line": 32, + "lineto": 32, "tdef": "typedef", "description": " List of filenames matching a pathspec", "comments": "", @@ -33897,25 +32893,21 @@ "unsigned int version", "git_proxy_t type", "const char * url", - "git_cred_acquire_cb credentials", + "git_credential_acquire_cb credentials", "git_transport_certificate_check_cb certificate_check", "void * payload" ], "type": "struct", "value": "git_proxy_options", - "file": "proxy.h", - "line": 42, - "lineto": 77, - "block": "unsigned int version\ngit_proxy_t type\nconst char * url\ngit_cred_acquire_cb credentials\ngit_transport_certificate_check_cb certificate_check\nvoid * payload", + "file": "git2/proxy.h", + "line": 50, + "lineto": 85, + "block": "unsigned int version\ngit_proxy_t type\nconst char * url\ngit_credential_acquire_cb credentials\ngit_transport_certificate_check_cb certificate_check\nvoid * payload", "tdef": "typedef", "description": " Options for connecting through a proxy", "comments": "

Note that not all types may be supported, depending on the platform and compilation options.

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "git_proxy_t", "name": "type", @@ -33927,14 +32919,14 @@ "comments": " The URL of the proxy." }, { - "type": "git_cred_acquire_cb", + "type": "git_credential_acquire_cb", "name": "credentials", "comments": " This will be called if the remote host requires\n authentication in order to connect to it.\n\n Returning GIT_PASSTHROUGH will make libgit2 behave as\n though this field isn't set." }, { "type": "git_transport_certificate_check_cb", "name": "certificate_check", - "comments": " If cert verification fails, this will be called to let the\n user make the final decision of whether to allow the\n connection to proceed. Returns 1 to allow the connection, 0\n to disallow it or a negative value to indicate an error." + "comments": " If cert verification fails, this will be called to let the\n user make the final decision of whether to allow the\n connection to proceed. Returns 0 to allow the connection\n or a negative value to indicate an error." }, { "type": "void *", @@ -33944,26 +32936,18 @@ ], "used": { "returns": [], - "needs": [ - "git_proxy_init_options", - "git_remote_connect", - "git_transport_smart_proxy_options" - ] + "needs": ["git_proxy_options_init", "git_remote_connect"] } } ], [ "git_proxy_t", { - "decl": [ - "GIT_PROXY_NONE", - "GIT_PROXY_AUTO", - "GIT_PROXY_SPECIFIED" - ], + "decl": ["GIT_PROXY_NONE", "GIT_PROXY_AUTO", "GIT_PROXY_SPECIFIED"], "type": "enum", - "file": "proxy.h", - "line": 18, - "lineto": 34, + "file": "git2/proxy.h", + "line": 26, + "lineto": 42, "block": "GIT_PROXY_NONE\nGIT_PROXY_AUTO\nGIT_PROXY_SPECIFIED", "tdef": "typedef", "description": " The type of proxy to use.", @@ -33988,10 +32972,7 @@ "value": 2 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -34000,17 +32981,17 @@ "decl": "git_push", "type": "struct", "value": "git_push", - "file": "types.h", - "line": 240, - "lineto": 240, + "file": "git2/types.h", + "line": 269, + "lineto": 269, "tdef": "typedef", "description": " Preparation for a push operation. Can be used to configure what to\n push and the level of parallelism of the packfile builder.", "comments": "", "used": { "returns": [], "needs": [ - "git_push_init_options", "git_push_negotiation", + "git_push_options_init", "git_remote_push", "git_remote_upload" ] @@ -34025,23 +33006,21 @@ "unsigned int pb_parallelism", "git_remote_callbacks callbacks", "git_proxy_options proxy_opts", - "git_strarray custom_headers" + "git_remote_redirect_t follow_redirects", + "git_strarray custom_headers", + "git_strarray remote_push_options" ], "type": "struct", "value": "git_push_options", - "file": "remote.h", - "line": 615, - "lineto": 642, - "block": "unsigned int version\nunsigned int pb_parallelism\ngit_remote_callbacks callbacks\ngit_proxy_options proxy_opts\ngit_strarray custom_headers", + "file": "git2/remote.h", + "line": 860, + "lineto": 899, + "block": "unsigned int version\nunsigned int pb_parallelism\ngit_remote_callbacks callbacks\ngit_proxy_options proxy_opts\ngit_remote_redirect_t follow_redirects\ngit_strarray custom_headers\ngit_strarray remote_push_options", "tdef": "typedef", "description": " Controls the behavior of a git_push object.", "comments": "", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "unsigned int", "name": "pb_parallelism", @@ -34057,16 +33036,26 @@ "name": "proxy_opts", "comments": " Proxy options to use, by default no proxy is used." }, + { + "type": "git_remote_redirect_t", + "name": "follow_redirects", + "comments": " Whether to allow off-site redirects. If this is not\n specified, the `http.followRedirects` configuration setting\n will be consulted." + }, { "type": "git_strarray", "name": "custom_headers", "comments": " Extra headers for this push operation" + }, + { + "type": "git_strarray", + "name": "remote_push_options", + "comments": " \"Push options\" to deliver to the remote." } ], "used": { "returns": [], "needs": [ - "git_push_init_options", + "git_push_options_init", "git_remote_push", "git_remote_upload" ] @@ -34084,9 +33073,9 @@ ], "type": "struct", "value": "git_push_update", - "file": "remote.h", - "line": 359, - "lineto": 376, + "file": "git2/remote.h", + "line": 490, + "lineto": 507, "block": "char * src_refname\nchar * dst_refname\ngit_oid src\ngit_oid dst", "tdef": "typedef", "description": " Represents an update which will be performed on the remote during push", @@ -34113,12 +33102,7 @@ "comments": " The new target for the reference" } ], - "used": { - "returns": [], - "needs": [ - "git_push_negotiation" - ] - } + "used": { "returns": [], "needs": ["git_push_negotiation"] } } ], [ @@ -34127,29 +33111,31 @@ "decl": "git_rebase", "type": "struct", "value": "git_rebase", - "file": "types.h", - "line": 191, - "lineto": 191, + "file": "git2/types.h", + "line": 220, + "lineto": 220, "tdef": "typedef", "description": " Representation of a rebase ", "comments": "", "used": { - "returns": [ - "git_rebase_operation_byindex" - ], + "returns": ["git_rebase_operation_byindex"], "needs": [ "git_rebase_abort", "git_rebase_commit", "git_rebase_finish", "git_rebase_free", "git_rebase_init", - "git_rebase_init_options", "git_rebase_inmemory_index", "git_rebase_next", + "git_rebase_onto_id", + "git_rebase_onto_name", "git_rebase_open", "git_rebase_operation_byindex", "git_rebase_operation_current", - "git_rebase_operation_entrycount" + "git_rebase_operation_entrycount", + "git_rebase_options_init", + "git_rebase_orig_head_id", + "git_rebase_orig_head_name" ] } } @@ -34164,9 +33150,9 @@ ], "type": "struct", "value": "git_rebase_operation", - "file": "rebase.h", - "line": 130, - "lineto": 145, + "file": "git2/rebase.h", + "line": 174, + "lineto": 189, "block": "git_rebase_operation_t type\nconst git_oid id\nconst char * exec", "tdef": "typedef", "description": " A rebase operation", @@ -34189,12 +33175,8 @@ } ], "used": { - "returns": [ - "git_rebase_operation_byindex" - ], - "needs": [ - "git_rebase_next" - ] + "returns": ["git_rebase_operation_byindex"], + "needs": ["git_rebase_next"] } } ], @@ -34210,9 +33192,9 @@ "GIT_REBASE_OPERATION_EXEC" ], "type": "enum", - "file": "rebase.h", - "line": 78, - "lineto": 114, + "file": "git2/rebase.h", + "line": 119, + "lineto": 155, "block": "GIT_REBASE_OPERATION_PICK\nGIT_REBASE_OPERATION_REWORD\nGIT_REBASE_OPERATION_EDIT\nGIT_REBASE_OPERATION_SQUASH\nGIT_REBASE_OPERATION_FIXUP\nGIT_REBASE_OPERATION_EXEC", "tdef": "typedef", "description": " Type of rebase operation in-progress after calling `git_rebase_next`.", @@ -34255,60 +33237,82 @@ "value": 5 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ - "git_ref_t", + "git_rebase_options", { "decl": [ - "GIT_REF_INVALID", - "GIT_REF_OID", - "GIT_REF_SYMBOLIC", - "GIT_REF_LISTALL" + "unsigned int version", + "int quiet", + "int inmemory", + "const char * rewrite_notes_ref", + "git_merge_options merge_options", + "git_checkout_options checkout_options", + "git_commit_create_cb commit_create_cb", + "int (*)(git_buf *, git_buf *, const char *, void *) signing_cb", + "void * payload" ], - "type": "enum", - "file": "types.h", - "line": 194, - "lineto": 199, - "block": "GIT_REF_INVALID\nGIT_REF_OID\nGIT_REF_SYMBOLIC\nGIT_REF_LISTALL", + "type": "struct", + "value": "git_rebase_options", + "file": "git2/rebase.h", + "line": 32, + "lineto": 114, + "block": "unsigned int version\nint quiet\nint inmemory\nconst char * rewrite_notes_ref\ngit_merge_options merge_options\ngit_checkout_options checkout_options\ngit_commit_create_cb commit_create_cb\nint (*)(git_buf *, git_buf *, const char *, void *) signing_cb\nvoid * payload", "tdef": "typedef", - "description": " Basic type of any Git reference. ", - "comments": "", + "description": " Rebase options", + "comments": "

Use to tell the rebase machinery how to operate.

\n", "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "int", - "name": "GIT_REF_INVALID", - "comments": "

Invalid reference

\n", - "value": 0 + "name": "quiet", + "comments": " Used by `git_rebase_init`, this will instruct other clients working\n on this rebase that you want a quiet rebase experience, which they\n may choose to provide in an application-specific manner. This has no\n effect upon libgit2 directly, but is provided for interoperability\n between Git tools." }, { "type": "int", - "name": "GIT_REF_OID", - "comments": "

A reference which points at an object id

\n", - "value": 1 + "name": "inmemory", + "comments": " Used by `git_rebase_init`, this will begin an in-memory rebase,\n which will allow callers to step through the rebase operations and\n commit the rebased changes, but will not rewind HEAD or update the\n repository to be in a rebasing state. This will not interfere with\n the working directory (if there is one)." }, { - "type": "int", - "name": "GIT_REF_SYMBOLIC", - "comments": "

A reference which points at another reference

\n", - "value": 2 + "type": "const char *", + "name": "rewrite_notes_ref", + "comments": " Used by `git_rebase_finish`, this is the name of the notes reference\n used to rewrite notes for rebased commits when finishing the rebase;\n if NULL, the contents of the configuration option `notes.rewriteRef`\n is examined, unless the configuration option `notes.rewrite.rebase`\n is set to false. If `notes.rewriteRef` is also NULL, notes will\n not be rewritten." }, { - "type": "int", - "name": "GIT_REF_LISTALL", - "comments": "", - "value": 3 + "type": "git_merge_options", + "name": "merge_options", + "comments": " Options to control how trees are merged during `git_rebase_next`." + }, + { + "type": "git_checkout_options", + "name": "checkout_options", + "comments": " Options to control how files are written during `git_rebase_init`,\n `git_rebase_next` and `git_rebase_abort`. Note that during\n `abort`, these options will add an implied `GIT_CHECKOUT_FORCE`\n to match git semantics." + }, + { + "type": "git_commit_create_cb", + "name": "commit_create_cb", + "comments": " Optional callback that allows users to override commit\n creation in `git_rebase_commit`. If specified, users can\n create their own commit and provide the commit ID, which\n may be useful for signing commits or otherwise customizing\n the commit creation.\n\n If this callback returns `GIT_PASSTHROUGH`, then\n `git_rebase_commit` will continue to create the commit." + }, + { + "type": "int (*)(git_buf *, git_buf *, const char *, void *)", + "name": "signing_cb", + "comments": " If provided, this will be called with the commit content, allowing\n a signature to be added to the rebase commit. Can be skipped with\n GIT_PASSTHROUGH. If GIT_PASSTHROUGH is returned, a commit will be made\n without a signature.\n\n This field is only used when performing git_rebase_commit.\n\n This callback is not invoked if a `git_commit_create_cb` is\n specified.\n\n This callback is deprecated; users should provide a\n creation callback as `commit_create_cb` that produces a\n commit buffer, signs it, and commits it." + }, + { + "type": "void *", + "name": "payload", + "comments": " This will be passed to each of the callbacks in this struct\n as the last parameter." } ], "used": { - "returns": [ - "git_reference_type" - ], - "needs": [] + "returns": [], + "needs": [ + "git_rebase_init", + "git_rebase_open", + "git_rebase_options_init" + ] } } ], @@ -34318,24 +33322,20 @@ "decl": "git_refdb", "type": "struct", "value": "git_refdb", - "file": "types.h", - "line": 96, - "lineto": 96, + "file": "git2/types.h", + "line": 108, + "lineto": 108, "tdef": "typedef", "description": " An open refs database handle. ", "comments": "", "used": { "returns": [], "needs": [ - "git_refdb_backend_fs", "git_refdb_compress", "git_refdb_free", - "git_refdb_init_backend", "git_refdb_new", "git_refdb_open", - "git_refdb_set_backend", - "git_repository_refdb", - "git_repository_set_refdb" + "git_repository_refdb" ] } } @@ -34346,108 +33346,13 @@ "decl": "git_refdb_backend", "type": "struct", "value": "git_refdb_backend", - "file": "types.h", - "line": 99, - "lineto": 99, - "block": "unsigned int version\nint (*)(int *, git_refdb_backend *, const char *) exists\nint (*)(git_reference **, git_refdb_backend *, const char *) lookup\nint (*)(git_reference_iterator **, struct git_refdb_backend *, const char *) iterator\nint (*)(git_refdb_backend *, const git_reference *, int, const git_signature *, const char *, const git_oid *, const char *) write\nint (*)(git_reference **, git_refdb_backend *, const char *, const char *, int, const git_signature *, const char *) rename\nint (*)(git_refdb_backend *, const char *, const git_oid *, const char *) del\nint (*)(git_refdb_backend *) compress\nint (*)(git_refdb_backend *, const char *) has_log\nint (*)(git_refdb_backend *, const char *) ensure_log\nvoid (*)(git_refdb_backend *) free\nint (*)(git_reflog **, git_refdb_backend *, const char *) reflog_read\nint (*)(git_refdb_backend *, git_reflog *) reflog_write\nint (*)(git_refdb_backend *, const char *, const char *) reflog_rename\nint (*)(git_refdb_backend *, const char *) reflog_delete\nint (*)(void **, git_refdb_backend *, const char *) lock\nint (*)(git_refdb_backend *, void *, int, int, const git_reference *, const git_signature *, const char *) unlock", + "file": "git2/types.h", + "line": 111, + "lineto": 111, "tdef": "typedef", "description": " A custom backend for refs ", "comments": "", - "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, - { - "type": "int (*)(int *, git_refdb_backend *, const char *)", - "name": "exists", - "comments": " Queries the refdb backend to determine if the given ref_name\n exists. A refdb implementation must provide this function." - }, - { - "type": "int (*)(git_reference **, git_refdb_backend *, const char *)", - "name": "lookup", - "comments": " Queries the refdb backend for a given reference. A refdb\n implementation must provide this function." - }, - { - "type": "int (*)(git_reference_iterator **, struct git_refdb_backend *, const char *)", - "name": "iterator", - "comments": " Allocate an iterator object for the backend.\n\n A refdb implementation must provide this function." - }, - { - "type": "int (*)(git_refdb_backend *, const git_reference *, int, const git_signature *, const char *, const git_oid *, const char *)", - "name": "write", - "comments": "" - }, - { - "type": "int (*)(git_reference **, git_refdb_backend *, const char *, const char *, int, const git_signature *, const char *)", - "name": "rename", - "comments": "" - }, - { - "type": "int (*)(git_refdb_backend *, const char *, const git_oid *, const char *)", - "name": "del", - "comments": " Deletes the given reference (and if necessary its reflog)\n from the refdb. A refdb implementation must provide this\n function." - }, - { - "type": "int (*)(git_refdb_backend *)", - "name": "compress", - "comments": " Suggests that the given refdb compress or optimize its references.\n This mechanism is implementation specific. (For on-disk reference\n databases, this may pack all loose references.) A refdb\n implementation may provide this function; if it is not provided,\n nothing will be done." - }, - { - "type": "int (*)(git_refdb_backend *, const char *)", - "name": "has_log", - "comments": " Query whether a particular reference has a log (may be empty)" - }, - { - "type": "int (*)(git_refdb_backend *, const char *)", - "name": "ensure_log", - "comments": " Make sure a particular reference will have a reflog which\n will be appended to on writes." - }, - { - "type": "void (*)(git_refdb_backend *)", - "name": "free", - "comments": " Frees any resources held by the refdb (including the `git_refdb_backend`\n itself). A refdb backend implementation must provide this function." - }, - { - "type": "int (*)(git_reflog **, git_refdb_backend *, const char *)", - "name": "reflog_read", - "comments": " Read the reflog for the given reference name." - }, - { - "type": "int (*)(git_refdb_backend *, git_reflog *)", - "name": "reflog_write", - "comments": " Write a reflog to disk." - }, - { - "type": "int (*)(git_refdb_backend *, const char *, const char *)", - "name": "reflog_rename", - "comments": " Rename a reflog" - }, - { - "type": "int (*)(git_refdb_backend *, const char *)", - "name": "reflog_delete", - "comments": " Remove a reflog." - }, - { - "type": "int (*)(void **, git_refdb_backend *, const char *)", - "name": "lock", - "comments": " Lock a reference. The opaque parameter will be passed to the unlock function" - }, - { - "type": "int (*)(git_refdb_backend *, void *, int, int, const git_reference *, const git_signature *, const char *)", - "name": "unlock", - "comments": " Unlock a reference. Only one of target or symbolic_target\n will be set. success indicates whether to update the\n reference or discard the lock (if it's false)" - } - ], - "used": { - "returns": [], - "needs": [ - "git_refdb_backend_fs", - "git_refdb_init_backend", - "git_refdb_set_backend" - ] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -34456,17 +33361,14 @@ "decl": "git_reference", "type": "struct", "value": "git_reference", - "file": "types.h", - "line": 173, - "lineto": 173, + "file": "git2/types.h", + "line": 194, + "lineto": 194, "tdef": "typedef", "description": " In-memory representation of a reference. ", "comments": "", "used": { - "returns": [ - "git_reference__alloc", - "git_reference__alloc_symbolic" - ], + "returns": ["git_reference_type"], "needs": [ "git_annotated_commit_from_ref", "git_branch_create", @@ -34480,6 +33382,7 @@ "git_branch_next", "git_branch_set_upstream", "git_branch_upstream", + "git_merge_analysis_for_ref", "git_reference_cmp", "git_reference_create", "git_reference_create_matching", @@ -34487,6 +33390,7 @@ "git_reference_dup", "git_reference_dwim", "git_reference_foreach", + "git_reference_foreach_cb", "git_reference_foreach_glob", "git_reference_foreach_name", "git_reference_free", @@ -34522,40 +33426,63 @@ } ], [ - "git_reference_iterator", + "git_reference_format_t", { - "decl": "git_reference_iterator", - "type": "struct", - "value": "git_reference_iterator", - "file": "types.h", - "line": 176, - "lineto": 176, - "block": "git_refdb * db\nint (*)(git_reference **, git_reference_iterator *) next\nint (*)(const char **, git_reference_iterator *) next_name\nvoid (*)(git_reference_iterator *) free", + "decl": [ + "GIT_REFERENCE_FORMAT_NORMAL", + "GIT_REFERENCE_FORMAT_ALLOW_ONELEVEL", + "GIT_REFERENCE_FORMAT_REFSPEC_PATTERN", + "GIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND" + ], + "type": "enum", + "file": "git2/refs.h", + "line": 663, + "lineto": 692, + "block": "GIT_REFERENCE_FORMAT_NORMAL\nGIT_REFERENCE_FORMAT_ALLOW_ONELEVEL\nGIT_REFERENCE_FORMAT_REFSPEC_PATTERN\nGIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND", "tdef": "typedef", - "description": " Iterator for references ", + "description": " Normalization options for reference lookup", "comments": "", "fields": [ { - "type": "git_refdb *", - "name": "db", - "comments": "" + "type": "int", + "name": "GIT_REFERENCE_FORMAT_NORMAL", + "comments": "

No particular normalization.

\n", + "value": 0 }, { - "type": "int (*)(git_reference **, git_reference_iterator *)", - "name": "next", - "comments": " Return the current reference and advance the iterator." + "type": "int", + "name": "GIT_REFERENCE_FORMAT_ALLOW_ONELEVEL", + "comments": "

Control whether one-level refnames are accepted\n (i.e., refnames that do not contain multiple /-separated\n components). Those are expected to be written only using\n uppercase letters and underscore (FETCH_HEAD, ...)

\n", + "value": 1 }, { - "type": "int (*)(const char **, git_reference_iterator *)", - "name": "next_name", - "comments": " Return the name of the current reference and advance the iterator" + "type": "int", + "name": "GIT_REFERENCE_FORMAT_REFSPEC_PATTERN", + "comments": "

Interpret the provided name as a reference pattern for a\n refspec (as used with remote repositories). If this option\n is enabled, the name is allowed to contain a single * (\n<star

\n\n
\n

)\n in place of a one full pathname component\n (e.g., foo/\n<star\n/bar but not foo/bar\n<star\n).

\n
\n", + "value": 2 }, { - "type": "void (*)(git_reference_iterator *)", - "name": "free", - "comments": " Free the iterator" + "type": "int", + "name": "GIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND", + "comments": "

Interpret the name as part of a refspec in shorthand form\n so the ONELEVEL naming rules aren't enforced and 'master'\n becomes a valid name.

\n", + "value": 4 } ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_reference_iterator", + { + "decl": "git_reference_iterator", + "type": "struct", + "value": "git_reference_iterator", + "file": "git2/types.h", + "line": 197, + "lineto": 197, + "tdef": "typedef", + "description": " Iterator for references ", + "comments": "", "used": { "returns": [], "needs": [ @@ -34569,52 +33496,49 @@ } ], [ - "git_reference_normalize_t", + "git_reference_t", { "decl": [ - "GIT_REF_FORMAT_NORMAL", - "GIT_REF_FORMAT_ALLOW_ONELEVEL", - "GIT_REF_FORMAT_REFSPEC_PATTERN", - "GIT_REF_FORMAT_REFSPEC_SHORTHAND" + "GIT_REFERENCE_INVALID", + "GIT_REFERENCE_DIRECT", + "GIT_REFERENCE_SYMBOLIC", + "GIT_REFERENCE_ALL" ], "type": "enum", - "file": "refs.h", - "line": 639, - "lineto": 668, - "block": "GIT_REF_FORMAT_NORMAL\nGIT_REF_FORMAT_ALLOW_ONELEVEL\nGIT_REF_FORMAT_REFSPEC_PATTERN\nGIT_REF_FORMAT_REFSPEC_SHORTHAND", + "file": "git2/types.h", + "line": 223, + "lineto": 228, + "block": "GIT_REFERENCE_INVALID\nGIT_REFERENCE_DIRECT\nGIT_REFERENCE_SYMBOLIC\nGIT_REFERENCE_ALL", "tdef": "typedef", - "description": " Normalization options for reference lookup", + "description": " Basic type of any Git reference. ", "comments": "", "fields": [ { "type": "int", - "name": "GIT_REF_FORMAT_NORMAL", - "comments": "

No particular normalization.

\n", + "name": "GIT_REFERENCE_INVALID", + "comments": "

Invalid reference

\n", "value": 0 }, { "type": "int", - "name": "GIT_REF_FORMAT_ALLOW_ONELEVEL", - "comments": "

Control whether one-level refnames are accepted\n (i.e., refnames that do not contain multiple /-separated\n components). Those are expected to be written only using\n uppercase letters and underscore (FETCH_HEAD, ...)

\n", + "name": "GIT_REFERENCE_DIRECT", + "comments": "

A reference that points at an object id

\n", "value": 1 }, { "type": "int", - "name": "GIT_REF_FORMAT_REFSPEC_PATTERN", - "comments": "

Interpret the provided name as a reference pattern for a\n refspec (as used with remote repositories). If this option\n is enabled, the name is allowed to contain a single * (\n<star

\n\n
\n

)\n in place of a one full pathname component\n (e.g., foo/\n<star\n/bar but not foo/bar\n<star\n).

\n
\n", + "name": "GIT_REFERENCE_SYMBOLIC", + "comments": "

A reference that points at another reference

\n", "value": 2 }, { "type": "int", - "name": "GIT_REF_FORMAT_REFSPEC_SHORTHAND", - "comments": "

Interpret the name as part of a refspec in shorthand form\n so the ONELEVEL naming rules aren't enforced and 'master'\n becomes a valid name.

\n", - "value": 4 + "name": "GIT_REFERENCE_ALL", + "comments": "", + "value": 3 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": ["git_reference_type"], "needs": [] } } ], [ @@ -34623,16 +33547,14 @@ "decl": "git_reflog", "type": "struct", "value": "git_reflog", - "file": "types.h", - "line": 150, - "lineto": 150, + "file": "git2/types.h", + "line": 171, + "lineto": 171, "tdef": "typedef", "description": " Representation of a reference log ", "comments": "", "used": { - "returns": [ - "git_reflog_entry_byindex" - ], + "returns": ["git_reflog_entry_byindex"], "needs": [ "git_reflog_append", "git_reflog_drop", @@ -34644,7 +33566,8 @@ "git_reflog_entrycount", "git_reflog_free", "git_reflog_read", - "git_reflog_write" + "git_reflog_write", + "git_transaction_set_reflog" ] } } @@ -34655,16 +33578,14 @@ "decl": "git_reflog_entry", "type": "struct", "value": "git_reflog_entry", - "file": "types.h", - "line": 147, - "lineto": 147, + "file": "git2/types.h", + "line": 168, + "lineto": 168, "tdef": "typedef", "description": " Representation of a reference log entry ", "comments": "", "used": { - "returns": [ - "git_reflog_entry_byindex" - ], + "returns": ["git_reflog_entry_byindex"], "needs": [ "git_reflog_entry_committer", "git_reflog_entry_id_new", @@ -34674,32 +33595,65 @@ } } ], + [ + "git_refspec", + { + "decl": "git_refspec", + "type": "struct", + "value": "git_refspec", + "file": "git2/types.h", + "line": 251, + "lineto": 251, + "tdef": "typedef", + "description": " A refspec specifies the mapping between remote and local reference\n names when fetch or pushing.", + "comments": "", + "used": { + "returns": ["git_remote_get_refspec"], + "needs": [ + "git_refspec_direction", + "git_refspec_dst", + "git_refspec_dst_matches", + "git_refspec_force", + "git_refspec_free", + "git_refspec_parse", + "git_refspec_rtransform", + "git_refspec_src", + "git_refspec_src_matches", + "git_refspec_src_matches_negative", + "git_refspec_string", + "git_refspec_transform" + ] + } + } + ], [ "git_remote", { "decl": "git_remote", "type": "struct", "value": "git_remote", - "file": "types.h", - "line": 228, - "lineto": 228, + "file": "git2/types.h", + "line": 257, + "lineto": 257, "tdef": "typedef", - "description": " Git's idea of a remote repository. A remote can be anonymous (in\n which case it does not have backing configuration entires).", + "description": " Git's idea of a remote repository. A remote can be anonymous (in\n which case it does not have backing configuration entries).", "comments": "", "used": { - "returns": [ - "git_remote_autotag" - ], + "returns": ["git_remote_autotag"], "needs": [ "git_headlist_cb", "git_remote_autotag", "git_remote_connect", + "git_remote_connect_ext", + "git_remote_connect_options_init", "git_remote_connected", "git_remote_create", "git_remote_create_anonymous", "git_remote_create_cb", "git_remote_create_detached", + "git_remote_create_options_init", "git_remote_create_with_fetchspec", + "git_remote_create_with_opts", "git_remote_default_branch", "git_remote_disconnect", "git_remote_download", @@ -34718,19 +33672,17 @@ "git_remote_prune_refs", "git_remote_push", "git_remote_pushurl", + "git_remote_ready_cb", "git_remote_refspec_count", "git_remote_set_autotag", + "git_remote_set_instance_pushurl", + "git_remote_set_instance_url", "git_remote_stats", "git_remote_stop", "git_remote_update_tips", "git_remote_upload", "git_remote_url", - "git_transport_cb", - "git_transport_dummy", - "git_transport_local", - "git_transport_new", - "git_transport_smart", - "git_transport_ssh_with_paths" + "git_transport_cb" ] } } @@ -34745,9 +33697,9 @@ "GIT_REMOTE_DOWNLOAD_TAGS_ALL" ], "type": "enum", - "file": "remote.h", - "line": 527, - "lineto": 545, + "file": "git2/remote.h", + "line": 739, + "lineto": 757, "block": "GIT_REMOTE_DOWNLOAD_TAGS_UNSPECIFIED\nGIT_REMOTE_DOWNLOAD_TAGS_AUTO\nGIT_REMOTE_DOWNLOAD_TAGS_NONE\nGIT_REMOTE_DOWNLOAD_TAGS_ALL", "tdef": "typedef", "description": " Automatic tag following option", @@ -34779,13 +33731,8 @@ } ], "used": { - "returns": [ - "git_remote_autotag" - ], - "needs": [ - "git_remote_set_autotag", - "git_remote_update_tips" - ] + "returns": ["git_remote_autotag"], + "needs": ["git_remote_set_autotag", "git_remote_update_tips"] } } ], @@ -34795,24 +33742,27 @@ "decl": [ "unsigned int version", "git_transport_message_cb sideband_progress", - "int (*)(git_remote_completion_type, void *) completion", - "git_cred_acquire_cb credentials", + "int (*)(git_remote_completion_t, void *) completion", + "git_credential_acquire_cb credentials", "git_transport_certificate_check_cb certificate_check", - "git_transfer_progress_cb transfer_progress", + "git_indexer_progress_cb transfer_progress", "int (*)(const char *, const git_oid *, const git_oid *, void *) update_tips", "git_packbuilder_progress pack_progress", - "git_push_transfer_progress push_transfer_progress", + "git_push_transfer_progress_cb push_transfer_progress", "git_push_update_reference_cb push_update_reference", "git_push_negotiation push_negotiation", "git_transport_cb transport", - "void * payload" + "git_remote_ready_cb remote_ready", + "void * payload", + "git_url_resolve_cb resolve_url", + "int (*)(const char *, const git_oid *, const git_oid *, git_refspec *, void *) update_refs" ], "type": "struct", "value": "git_remote_callbacks", - "file": "remote.h", - "line": 408, - "lineto": 490, - "block": "unsigned int version\ngit_transport_message_cb sideband_progress\nint (*)(git_remote_completion_type, void *) completion\ngit_cred_acquire_cb credentials\ngit_transport_certificate_check_cb certificate_check\ngit_transfer_progress_cb transfer_progress\nint (*)(const char *, const git_oid *, const git_oid *, void *) update_tips\ngit_packbuilder_progress pack_progress\ngit_push_transfer_progress push_transfer_progress\ngit_push_update_reference_cb push_update_reference\ngit_push_negotiation push_negotiation\ngit_transport_cb transport\nvoid * payload", + "file": "git2/remote.h", + "line": 572, + "lineto": 698, + "block": "unsigned int version\ngit_transport_message_cb sideband_progress\nint (*)(git_remote_completion_t, void *) completion\ngit_credential_acquire_cb credentials\ngit_transport_certificate_check_cb certificate_check\ngit_indexer_progress_cb transfer_progress\nint (*)(const char *, const git_oid *, const git_oid *, void *) update_tips\ngit_packbuilder_progress pack_progress\ngit_push_transfer_progress_cb push_transfer_progress\ngit_push_update_reference_cb push_update_reference\ngit_push_negotiation push_negotiation\ngit_transport_cb transport\ngit_remote_ready_cb remote_ready\nvoid * payload\ngit_url_resolve_cb resolve_url\nint (*)(const char *, const git_oid *, const git_oid *, git_refspec *, void *) update_refs", "tdef": null, "description": " The callback settings structure", "comments": "

Set the callbacks to be called by the remote when informing the user about the progress of the network operations.

\n", @@ -34820,7 +33770,7 @@ { "type": "unsigned int", "name": "version", - "comments": "" + "comments": " The version " }, { "type": "git_transport_message_cb", @@ -34828,29 +33778,29 @@ "comments": " Textual progress from the remote. Text send over the\n progress side-band will be passed to this function (this is\n the 'counting objects' output)." }, { - "type": "int (*)(git_remote_completion_type, void *)", + "type": "int (*)(git_remote_completion_t, void *)", "name": "completion", - "comments": " Completion is called when different parts of the download\n process are done (currently unused)." + "comments": "" }, { - "type": "git_cred_acquire_cb", + "type": "git_credential_acquire_cb", "name": "credentials", "comments": " This will be called if the remote host requires\n authentication in order to connect to it.\n\n Returning GIT_PASSTHROUGH will make libgit2 behave as\n though this field isn't set." }, { "type": "git_transport_certificate_check_cb", "name": "certificate_check", - "comments": " If cert verification fails, this will be called to let the\n user make the final decision of whether to allow the\n connection to proceed. Returns 1 to allow the connection, 0\n to disallow it or a negative value to indicate an error." + "comments": " If cert verification fails, this will be called to let the\n user make the final decision of whether to allow the\n connection to proceed. Returns 0 to allow the connection\n or a negative value to indicate an error." }, { - "type": "git_transfer_progress_cb", + "type": "git_indexer_progress_cb", "name": "transfer_progress", "comments": " During the download of new data, this will be regularly\n called with the current count of progress done by the\n indexer." }, { "type": "int (*)(const char *, const git_oid *, const git_oid *, void *)", "name": "update_tips", - "comments": " Each time a reference is updated locally, this function\n will be called with information about it." + "comments": "" }, { "type": "git_packbuilder_progress", @@ -34858,7 +33808,7 @@ "comments": " Function to call with progress information during pack\n building. Be aware that this is called inline with pack\n building operations, so performance may be affected." }, { - "type": "git_push_transfer_progress", + "type": "git_push_transfer_progress_cb", "name": "push_transfer_progress", "comments": " Function to call with progress information during the\n upload portion of a push. Be aware that this is called\n inline with pack building operations, so performance may be\n affected." }, @@ -34877,10 +33827,25 @@ "name": "transport", "comments": " Create the transport to use for this operation. Leave NULL\n to auto-detect." }, + { + "type": "git_remote_ready_cb", + "name": "remote_ready", + "comments": " Callback when the remote is ready to connect." + }, { "type": "void *", "name": "payload", "comments": " This will be passed to each of the callbacks in this struct\n as the last parameter." + }, + { + "type": "git_url_resolve_cb", + "name": "resolve_url", + "comments": " Resolve URL before connecting to remote.\n The returned URL will be used to connect to the remote instead.\n\n This callback is deprecated; users should use\n git_remote_ready_cb and configure the instance URL instead." + }, + { + "type": "int (*)(const char *, const git_oid *, const git_oid *, git_refspec *, void *)", + "name": "update_refs", + "comments": "" } ], "used": { @@ -34895,7 +33860,7 @@ } ], [ - "git_remote_completion_type", + "git_remote_completion_t", { "decl": [ "GIT_REMOTE_COMPLETION_DOWNLOAD", @@ -34903,9 +33868,9 @@ "GIT_REMOTE_COMPLETION_ERROR" ], "type": "enum", - "file": "remote.h", - "line": 344, - "lineto": 348, + "file": "git2/remote.h", + "line": 466, + "lineto": 470, "block": "GIT_REMOTE_COMPLETION_DOWNLOAD\nGIT_REMOTE_COMPLETION_INDEXING\nGIT_REMOTE_COMPLETION_ERROR\nGIT_REMOTE_COMPLETION_DOWNLOAD\nGIT_REMOTE_COMPLETION_INDEXING\nGIT_REMOTE_COMPLETION_ERROR", "tdef": "typedef", "description": " Argument to the completion callback which tells it which operation\n finished.", @@ -34930,9 +33895,137 @@ "value": 2 } ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_remote_connect_options", + { + "decl": [ + "unsigned int version", + "git_remote_callbacks callbacks", + "git_proxy_options proxy_opts", + "git_remote_redirect_t follow_redirects", + "git_strarray custom_headers" + ], + "type": "struct", + "value": "git_remote_connect_options", + "file": "git2/remote.h", + "line": 928, + "lineto": 946, + "block": "unsigned int version\ngit_remote_callbacks callbacks\ngit_proxy_options proxy_opts\ngit_remote_redirect_t follow_redirects\ngit_strarray custom_headers", + "tdef": "typedef", + "description": " Remote creation options structure", + "comments": "

Initialize with GIT_REMOTE_CREATE_OPTIONS_INIT. Alternatively, you can use git_remote_create_options_init.

\n", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "git_remote_callbacks", + "name": "callbacks", + "comments": " Callbacks to use for this connection " + }, + { + "type": "git_proxy_options", + "name": "proxy_opts", + "comments": " HTTP Proxy settings " + }, + { + "type": "git_remote_redirect_t", + "name": "follow_redirects", + "comments": " Whether to allow off-site redirects. If this is not\n specified, the `http.followRedirects` configuration setting\n will be consulted." + }, + { + "type": "git_strarray", + "name": "custom_headers", + "comments": " Extra HTTP headers to use in this connection " + } + ], + "used": { + "returns": [], + "needs": ["git_remote_connect_ext", "git_remote_connect_options_init"] + } + } + ], + [ + "git_remote_create_flags", + { + "decl": [ + "GIT_REMOTE_CREATE_SKIP_INSTEADOF", + "GIT_REMOTE_CREATE_SKIP_DEFAULT_FETCHSPEC" + ], + "type": "enum", + "file": "git2/remote.h", + "line": 71, + "lineto": 77, + "block": "GIT_REMOTE_CREATE_SKIP_INSTEADOF\nGIT_REMOTE_CREATE_SKIP_DEFAULT_FETCHSPEC", + "tdef": "typedef", + "description": " Remote creation options flags", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_REMOTE_CREATE_SKIP_INSTEADOF", + "comments": "

Ignore the repository apply.insteadOf configuration

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_REMOTE_CREATE_SKIP_DEFAULT_FETCHSPEC", + "comments": "

Don't build a fetchspec from the name if none is set

\n", + "value": 2 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_remote_create_options", + { + "decl": [ + "unsigned int version", + "git_repository * repository", + "const char * name", + "const char * fetchspec", + "unsigned int flags" + ], + "type": "struct", + "value": "git_remote_create_options", + "file": "git2/remote.h", + "line": 97, + "lineto": 117, + "block": "unsigned int version\ngit_repository * repository\nconst char * name\nconst char * fetchspec\nunsigned int flags", + "tdef": "typedef", + "description": " Remote creation options structure", + "comments": "

Initialize with GIT_REMOTE_CREATE_OPTIONS_INIT. Alternatively, you can use git_remote_create_options_init.

\n", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "git_repository *", + "name": "repository", + "comments": " The repository that should own the remote.\n Setting this to NULL results in a detached remote." + }, + { + "type": "const char *", + "name": "name", + "comments": " The remote's name.\n Setting this to NULL results in an in-memory/anonymous remote." + }, + { + "type": "const char *", + "name": "fetchspec", + "comments": " The fetchspec the remote should use. " + }, + { + "type": "unsigned int", + "name": "flags", + "comments": " Additional flags for the remote. See git_remote_create_flags. " + } + ], "used": { "returns": [], - "needs": [] + "needs": [ + "git_remote_create_options_init", + "git_remote_create_with_opts" + ] } } ], @@ -34948,47 +34041,96 @@ ], "type": "struct", "value": "git_remote_head", - "file": "net.h", - "line": 40, - "lineto": 50, + "file": "git2/net.h", + "line": 41, + "lineto": 51, "block": "int local\ngit_oid oid\ngit_oid loid\nchar * name\nchar * symref_target", "tdef": null, "description": " Description of a reference advertised by a remote server, given out\n on `ls` calls.", "comments": "", + "fields": [ + { "type": "int", "name": "local", "comments": "" }, + { "type": "git_oid", "name": "oid", "comments": "" }, + { "type": "git_oid", "name": "loid", "comments": "" }, + { "type": "char *", "name": "name", "comments": "" }, + { + "type": "char *", + "name": "symref_target", + "comments": " If the server send a symref mapping for this ref, this will\n point to the target." + } + ], + "used": { "returns": [], "needs": ["git_headlist_cb", "git_remote_ls"] } + } + ], + [ + "git_remote_redirect_t", + { + "decl": [ + "GIT_REMOTE_REDIRECT_NONE", + "GIT_REMOTE_REDIRECT_INITIAL", + "GIT_REMOTE_REDIRECT_ALL" + ], + "type": "enum", + "file": "git2/remote.h", + "line": 49, + "lineto": 66, + "block": "GIT_REMOTE_REDIRECT_NONE\nGIT_REMOTE_REDIRECT_INITIAL\nGIT_REMOTE_REDIRECT_ALL", + "tdef": "typedef", + "description": " Remote redirection settings; whether redirects to another host\n are permitted. By default, git will follow a redirect on the\n initial request (`/info/refs`), but not subsequent requests.", + "comments": "", "fields": [ { "type": "int", - "name": "local", - "comments": "" + "name": "GIT_REMOTE_REDIRECT_NONE", + "comments": "

Do not follow any off-site redirects at any stage of\n the fetch or push.

\n", + "value": 1 }, { - "type": "git_oid", - "name": "oid", - "comments": "" + "type": "int", + "name": "GIT_REMOTE_REDIRECT_INITIAL", + "comments": "

Allow off-site redirects only upon the initial request.\n This is the default.

\n", + "value": 2 }, { - "type": "git_oid", - "name": "loid", - "comments": "" - }, + "type": "int", + "name": "GIT_REMOTE_REDIRECT_ALL", + "comments": "

Allow redirects at any stage in the fetch or push.

\n", + "value": 4 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_remote_update_flags", + { + "decl": [ + "GIT_REMOTE_UPDATE_FETCHHEAD", + "GIT_REMOTE_UPDATE_REPORT_UNCHANGED" + ], + "type": "enum", + "file": "git2/remote.h", + "line": 82, + "lineto": 88, + "block": "GIT_REMOTE_UPDATE_FETCHHEAD\nGIT_REMOTE_UPDATE_REPORT_UNCHANGED", + "tdef": "typedef", + "description": " How to handle reference updates.", + "comments": "", + "fields": [ { - "type": "char *", - "name": "name", - "comments": "" + "type": "int", + "name": "GIT_REMOTE_UPDATE_FETCHHEAD", + "comments": "", + "value": 1 }, { - "type": "char *", - "name": "symref_target", - "comments": " If the server send a symref mapping for this ref, this will\n point to the target." + "type": "int", + "name": "GIT_REMOTE_UPDATE_REPORT_UNCHANGED", + "comments": "", + "value": 2 } ], - "used": { - "returns": [], - "needs": [ - "git_headlist_cb", - "git_remote_ls" - ] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -34997,9 +34139,9 @@ "decl": "git_repository", "type": "struct", "value": "git_repository", - "file": "types.h", - "line": 105, - "lineto": 105, + "file": "git2/types.h", + "line": 123, + "lineto": 123, "tdef": "typedef", "description": " Representation of an existing git repository,\n including all its object contents", "comments": "", @@ -35007,9 +34149,9 @@ "returns": [ "git_blob_owner", "git_commit_owner", - "git_filter_source_repo", "git_index_owner", "git_object_owner", + "git_patch_owner", "git_reference_owner", "git_remote_owner", "git_revwalk_repository", @@ -35022,22 +34164,30 @@ "git_annotated_commit_from_ref", "git_annotated_commit_from_revspec", "git_annotated_commit_lookup", + "git_apply", + "git_apply_to_tree", "git_attr_add_macro", "git_attr_cache_flush", "git_attr_foreach", + "git_attr_foreach_ext", "git_attr_get", + "git_attr_get_ext", "git_attr_get_many", - "git_blame_file", - "git_blob_create_frombuffer", - "git_blob_create_fromdisk", - "git_blob_create_fromstream", - "git_blob_create_fromworkdir", + "git_attr_get_many_ext", + "git_blob_create_from_buffer", + "git_blob_create_from_disk", + "git_blob_create_from_stream", + "git_blob_create_from_workdir", "git_blob_lookup", "git_blob_lookup_prefix", "git_branch_create", "git_branch_create_from_annotated", "git_branch_iterator_new", "git_branch_lookup", + "git_branch_remote_name", + "git_branch_upstream_merge", + "git_branch_upstream_name", + "git_branch_upstream_remote", "git_checkout_head", "git_checkout_index", "git_checkout_tree", @@ -35046,14 +34196,12 @@ "git_clone", "git_commit_create", "git_commit_create_buffer", - "git_commit_create_from_callback", - "git_commit_create_from_ids", + "git_commit_create_from_stage", "git_commit_create_v", "git_commit_create_with_signature", "git_commit_extract_signature", "git_commit_lookup", "git_commit_lookup_prefix", - "git_config_add_backend", "git_config_add_file_ondisk", "git_describe_workdir", "git_diff_commit_as_email", @@ -35065,17 +34213,19 @@ "git_diff_tree_to_workdir_with_index", "git_filter_list_apply_to_file", "git_filter_list_load", - "git_filter_list_new", + "git_filter_list_load_ext", "git_filter_list_stream_file", "git_graph_ahead_behind", "git_graph_descendant_of", + "git_graph_reachable_from_any", "git_ignore_add_rule", "git_ignore_clear_internal_rules", "git_ignore_path_is_ignored", "git_index_write_tree_to", - "git_mempack_dump", + "git_mailmap_from_repository", "git_merge", "git_merge_analysis", + "git_merge_analysis_for_ref", "git_merge_base", "git_merge_base_many", "git_merge_base_octopus", @@ -35084,21 +34234,12 @@ "git_merge_commits", "git_merge_file_from_index", "git_merge_trees", - "git_note_commit_create", - "git_note_commit_read", - "git_note_commit_remove", - "git_note_create", - "git_note_foreach", - "git_note_iterator_new", - "git_note_read", - "git_note_remove", "git_object_lookup", "git_object_lookup_prefix", "git_packbuilder_new", "git_pathspec_match_workdir", "git_rebase_init", "git_rebase_open", - "git_refdb_backend_fs", "git_refdb_new", "git_refdb_open", "git_reference_create", @@ -35133,7 +34274,7 @@ "git_remote_set_autotag", "git_remote_set_pushurl", "git_remote_set_url", - "git_repository__cleanup", + "git_repository_commit_parents", "git_repository_commondir", "git_repository_config", "git_repository_config_snapshot", @@ -35145,13 +34286,14 @@ "git_repository_hashfile", "git_repository_head", "git_repository_head_detached", + "git_repository_head_detached_for_worktree", "git_repository_head_for_worktree", "git_repository_head_unborn", "git_repository_ident", "git_repository_index", "git_repository_init", "git_repository_init_ext", - "git_repository_init_init_options", + "git_repository_init_options_init", "git_repository_is_bare", "git_repository_is_empty", "git_repository_is_shallow", @@ -35160,30 +34302,22 @@ "git_repository_mergehead_foreach", "git_repository_message", "git_repository_message_remove", - "git_repository_new", "git_repository_odb", + "git_repository_oid_type", "git_repository_open", "git_repository_open_bare", "git_repository_open_ext", "git_repository_open_from_worktree", "git_repository_path", "git_repository_refdb", - "git_repository_reinit_filesystem", - "git_repository_set_bare", - "git_repository_set_config", "git_repository_set_head", "git_repository_set_head_detached", "git_repository_set_head_detached_from_annotated", "git_repository_set_ident", - "git_repository_set_index", "git_repository_set_namespace", - "git_repository_set_odb", - "git_repository_set_refdb", "git_repository_set_workdir", "git_repository_state", "git_repository_state_cleanup", - "git_repository_submodule_cache_all", - "git_repository_submodule_cache_clear", "git_repository_workdir", "git_repository_wrap_odb", "git_reset", @@ -35196,16 +34330,20 @@ "git_revparse_single", "git_revwalk_new", "git_signature_default", + "git_signature_default_from_env", "git_stash_apply", "git_stash_drop", "git_stash_foreach", "git_stash_pop", + "git_stash_save", + "git_stash_save_with_opts", "git_status_file", "git_status_foreach", "git_status_foreach_ext", "git_status_list_new", "git_status_should_ignore", "git_submodule_add_setup", + "git_submodule_clone", "git_submodule_foreach", "git_submodule_lookup", "git_submodule_open", @@ -35219,7 +34357,7 @@ "git_submodule_status", "git_tag_annotation_create", "git_tag_create", - "git_tag_create_frombuffer", + "git_tag_create_from_buffer", "git_tag_create_lightweight", "git_tag_delete", "git_tag_foreach", @@ -35227,6 +34365,7 @@ "git_tag_list_match", "git_tag_lookup", "git_tag_lookup_prefix", + "git_transaction_new", "git_tree_create_updated", "git_tree_entry_to_object", "git_tree_lookup", @@ -35253,61 +34392,58 @@ "GIT_REPOSITORY_INIT_RELATIVE_GITLINK" ], "type": "enum", - "file": "repository.h", - "line": 232, - "lineto": 240, + "file": "git2/repository.h", + "line": 249, + "lineto": 295, "block": "GIT_REPOSITORY_INIT_BARE\nGIT_REPOSITORY_INIT_NO_REINIT\nGIT_REPOSITORY_INIT_NO_DOTGIT_DIR\nGIT_REPOSITORY_INIT_MKDIR\nGIT_REPOSITORY_INIT_MKPATH\nGIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE\nGIT_REPOSITORY_INIT_RELATIVE_GITLINK", "tdef": "typedef", "description": " Option flags for `git_repository_init_ext`.", - "comments": "

These flags configure extra behaviors to git_repository_init_ext. In every case, the default behavior is the zero value (i.e. flag is not set). Just OR the flag values together for the flags parameter when initializing a new repo. Details of individual values are:

\n\n\n", + "comments": "

These flags configure extra behaviors to git_repository_init_ext. In every case, the default behavior is the zero value (i.e. flag is not set). Just OR the flag values together for the flags parameter when initializing a new repo.

\n", "fields": [ { "type": "int", "name": "GIT_REPOSITORY_INIT_BARE", - "comments": "", + "comments": "

Create a bare repository with no working directory.

\n", "value": 1 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_NO_REINIT", - "comments": "", + "comments": "

Return an GIT_EEXISTS error if the repo_path appears to already be\n an git repository.

\n", "value": 2 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_NO_DOTGIT_DIR", - "comments": "", + "comments": "

Normally a "/.git/" will be appended to the repo path for\n non-bare repos (if it is not already there), but passing this flag\n prevents that behavior.

\n", "value": 4 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_MKDIR", - "comments": "", + "comments": "

Make the repo_path (and workdir_path) as needed. Init is always willing\n to create the ".git" directory even without this flag. This flag tells\n init to create the trailing component of the repo and workdir paths\n as needed.

\n", "value": 8 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_MKPATH", - "comments": "", + "comments": "

Recursively make all components of the repo and workdir paths as\n necessary.

\n", "value": 16 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE", - "comments": "", + "comments": "

libgit2 normally uses internal templates to initialize a new repo.\n This flags enables external templates, looking the "template_path" from\n the options if set, or the init.templatedir global config if not,\n or falling back on "/usr/share/git-core/templates" if it exists.

\n", "value": 32 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_RELATIVE_GITLINK", - "comments": "", + "comments": "

If an alternate workdir is specified, use relative paths for the gitdir\n and core.worktree.

\n", "value": 64 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -35319,37 +34455,34 @@ "GIT_REPOSITORY_INIT_SHARED_ALL" ], "type": "enum", - "file": "repository.h", - "line": 255, - "lineto": 259, + "file": "git2/repository.h", + "line": 304, + "lineto": 320, "block": "GIT_REPOSITORY_INIT_SHARED_UMASK\nGIT_REPOSITORY_INIT_SHARED_GROUP\nGIT_REPOSITORY_INIT_SHARED_ALL", "tdef": "typedef", "description": " Mode options for `git_repository_init_ext`.", - "comments": "

Set the mode field of the git_repository_init_options structure either to the custom mode that you would like, or to one of the following modes:

\n\n\n", + "comments": "

Set the mode field of the git_repository_init_options structure either to the custom mode that you would like, or to one of the defined modes.

\n", "fields": [ { "type": "int", "name": "GIT_REPOSITORY_INIT_SHARED_UMASK", - "comments": "", + "comments": "

Use permissions configured by umask - the default.

\n", "value": 0 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_SHARED_GROUP", - "comments": "", + "comments": "

Use "--shared=group" behavior, chmod'ing the new repo to be group\n writable and "g+sx" for sticky group assignment.

\n", "value": 1533 }, { "type": "int", "name": "GIT_REPOSITORY_INIT_SHARED_ALL", - "comments": "", + "comments": "

Use "--shared=all" behavior, adding world readability.

\n", "value": 1535 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -35367,60 +34500,56 @@ ], "type": "struct", "value": "git_repository_init_options", - "file": "repository.h", - "line": 289, - "lineto": 298, + "file": "git2/repository.h", + "line": 328, + "lineto": 387, "block": "unsigned int version\nuint32_t flags\nuint32_t mode\nconst char * workdir_path\nconst char * description\nconst char * template_path\nconst char * initial_head\nconst char * origin_url", "tdef": "typedef", "description": " Extended options structure for `git_repository_init_ext`.", - "comments": "

This contains extra options for git_repository_init_ext that enable additional initialization features. The fields are:

\n\n\n", + "comments": "

This contains extra options for git_repository_init_ext that enable additional initialization features.

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "uint32_t", "name": "flags", - "comments": "" + "comments": " Combination of GIT_REPOSITORY_INIT flags above." }, { "type": "uint32_t", "name": "mode", - "comments": "" + "comments": " Set to one of the standard GIT_REPOSITORY_INIT_SHARED_... constants\n above, or to a custom value that you would like." }, { "type": "const char *", "name": "workdir_path", - "comments": "" + "comments": " The path to the working dir or NULL for default (i.e. repo_path parent\n on non-bare repos). IF THIS IS RELATIVE PATH, IT WILL BE EVALUATED\n RELATIVE TO THE REPO_PATH. If this is not the \"natural\" working\n directory, a .git gitlink file will be created here linking to the\n repo_path." }, { "type": "const char *", "name": "description", - "comments": "" + "comments": " If set, this will be used to initialize the \"description\" file in the\n repository, instead of using the template content." }, { "type": "const char *", "name": "template_path", - "comments": "" + "comments": " When GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE is set, this contains\n the path to use for the template directory. If this is NULL, the config\n or default directory options will be used instead." }, { "type": "const char *", "name": "initial_head", - "comments": "" + "comments": " The name of the head to point HEAD at. If NULL, then this will be\n treated as \"master\" and the HEAD ref will be set to \"refs/heads/master\".\n If this begins with \"refs/\" it will be used verbatim;\n otherwise \"refs/heads/\" will be prefixed." }, { "type": "const char *", "name": "origin_url", - "comments": "" + "comments": " If this is non-NULL, then after the rest of the repository\n initialization is completed, an \"origin\" remote will be added\n pointing to this URL." } ], "used": { "returns": [], "needs": [ "git_repository_init_ext", - "git_repository_init_init_options" + "git_repository_init_options_init" ] } } @@ -35442,13 +34571,15 @@ "GIT_REPOSITORY_ITEM_HOOKS", "GIT_REPOSITORY_ITEM_LOGS", "GIT_REPOSITORY_ITEM_MODULES", - "GIT_REPOSITORY_ITEM_WORKTREES" + "GIT_REPOSITORY_ITEM_WORKTREES", + "GIT_REPOSITORY_ITEM_WORKTREE_CONFIG", + "GIT_REPOSITORY_ITEM__LAST" ], "type": "enum", - "file": "repository.h", - "line": 412, - "lineto": 427, - "block": "GIT_REPOSITORY_ITEM_GITDIR\nGIT_REPOSITORY_ITEM_WORKDIR\nGIT_REPOSITORY_ITEM_COMMONDIR\nGIT_REPOSITORY_ITEM_INDEX\nGIT_REPOSITORY_ITEM_OBJECTS\nGIT_REPOSITORY_ITEM_REFS\nGIT_REPOSITORY_ITEM_PACKED_REFS\nGIT_REPOSITORY_ITEM_REMOTES\nGIT_REPOSITORY_ITEM_CONFIG\nGIT_REPOSITORY_ITEM_INFO\nGIT_REPOSITORY_ITEM_HOOKS\nGIT_REPOSITORY_ITEM_LOGS\nGIT_REPOSITORY_ITEM_MODULES\nGIT_REPOSITORY_ITEM_WORKTREES", + "file": "git2/repository.h", + "line": 512, + "lineto": 529, + "block": "GIT_REPOSITORY_ITEM_GITDIR\nGIT_REPOSITORY_ITEM_WORKDIR\nGIT_REPOSITORY_ITEM_COMMONDIR\nGIT_REPOSITORY_ITEM_INDEX\nGIT_REPOSITORY_ITEM_OBJECTS\nGIT_REPOSITORY_ITEM_REFS\nGIT_REPOSITORY_ITEM_PACKED_REFS\nGIT_REPOSITORY_ITEM_REMOTES\nGIT_REPOSITORY_ITEM_CONFIG\nGIT_REPOSITORY_ITEM_INFO\nGIT_REPOSITORY_ITEM_HOOKS\nGIT_REPOSITORY_ITEM_LOGS\nGIT_REPOSITORY_ITEM_MODULES\nGIT_REPOSITORY_ITEM_WORKTREES\nGIT_REPOSITORY_ITEM_WORKTREE_CONFIG\nGIT_REPOSITORY_ITEM__LAST", "tdef": "typedef", "description": " List of items which belong to the git repository layout", "comments": "", @@ -35536,14 +34667,21 @@ "name": "GIT_REPOSITORY_ITEM_WORKTREES", "comments": "", "value": 13 + }, + { + "type": "int", + "name": "GIT_REPOSITORY_ITEM_WORKTREE_CONFIG", + "comments": "", + "value": 14 + }, + { + "type": "int", + "name": "GIT_REPOSITORY_ITEM__LAST", + "comments": "", + "value": 15 } ], - "used": { - "returns": [], - "needs": [ - "git_repository_item_path" - ] - } + "used": { "returns": [], "needs": ["git_repository_item_path"] } } ], [ @@ -35557,49 +34695,46 @@ "GIT_REPOSITORY_OPEN_FROM_ENV" ], "type": "enum", - "file": "repository.h", - "line": 126, - "lineto": 132, + "file": "git2/repository.h", + "line": 110, + "lineto": 157, "block": "GIT_REPOSITORY_OPEN_NO_SEARCH\nGIT_REPOSITORY_OPEN_CROSS_FS\nGIT_REPOSITORY_OPEN_BARE\nGIT_REPOSITORY_OPEN_NO_DOTGIT\nGIT_REPOSITORY_OPEN_FROM_ENV", "tdef": "typedef", "description": " Option flags for `git_repository_open_ext`.", - "comments": "\n", + "comments": "", "fields": [ { "type": "int", "name": "GIT_REPOSITORY_OPEN_NO_SEARCH", - "comments": "", + "comments": "

Only open the repository if it can be immediately found in the\n start_path. Do not walk up from the start_path looking at parent\n directories.

\n", "value": 1 }, { "type": "int", "name": "GIT_REPOSITORY_OPEN_CROSS_FS", - "comments": "", + "comments": "

Unless this flag is set, open will not continue searching across\n filesystem boundaries (i.e. when st_dev changes from the stat\n system call). For example, searching in a user's home directory at\n "/home/user/source/" will not return "/.git/" as the found repo if\n "/" is a different filesystem than "/home".

\n", "value": 2 }, { "type": "int", "name": "GIT_REPOSITORY_OPEN_BARE", - "comments": "", + "comments": "

Open repository as a bare repo regardless of core.bare config, and\n defer loading config file for faster setup.\n Unlike git_repository_open_bare, this can follow gitlinks.

\n", "value": 4 }, { "type": "int", "name": "GIT_REPOSITORY_OPEN_NO_DOTGIT", - "comments": "", + "comments": "

Do not check for a repository by appending /.git to the start_path;\n only open the repository if start_path itself points to the git\n directory.

\n", "value": 8 }, { "type": "int", "name": "GIT_REPOSITORY_OPEN_FROM_ENV", - "comments": "", + "comments": "

Find and open a git repository, respecting the environment variables\n used by the git command-line tools.\n If set, git_repository_open_ext will ignore the other flags and\n the ceiling_dirs argument, and will allow a NULL path to use\n GIT_DIR or search from the current directory.\n The search for a repository will respect $GIT_CEILING_DIRECTORIES and\n $GIT_DISCOVERY_ACROSS_FILESYSTEM. The opened repository will\n respect $GIT_INDEX_FILE, $GIT_NAMESPACE, $GIT_OBJECT_DIRECTORY, and\n $GIT_ALTERNATE_OBJECT_DIRECTORIES.\n In the future, this flag will also cause git_repository_open_ext\n to respect $GIT_WORK_TREE and $GIT_COMMON_DIR; currently,\n git_repository_open_ext with this flag will error out if either\n $GIT_WORK_TREE or $GIT_COMMON_DIR is set.

\n", "value": 16 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -35620,9 +34755,9 @@ "GIT_REPOSITORY_STATE_APPLY_MAILBOX_OR_REBASE" ], "type": "enum", - "file": "repository.h", - "line": 784, - "lineto": 797, + "file": "git2/repository.h", + "line": 915, + "lineto": 928, "block": "GIT_REPOSITORY_STATE_NONE\nGIT_REPOSITORY_STATE_MERGE\nGIT_REPOSITORY_STATE_REVERT\nGIT_REPOSITORY_STATE_REVERT_SEQUENCE\nGIT_REPOSITORY_STATE_CHERRYPICK\nGIT_REPOSITORY_STATE_CHERRYPICK_SEQUENCE\nGIT_REPOSITORY_STATE_BISECT\nGIT_REPOSITORY_STATE_REBASE\nGIT_REPOSITORY_STATE_REBASE_INTERACTIVE\nGIT_REPOSITORY_STATE_REBASE_MERGE\nGIT_REPOSITORY_STATE_APPLY_MAILBOX\nGIT_REPOSITORY_STATE_APPLY_MAILBOX_OR_REBASE", "tdef": "typedef", "description": " Repository state", @@ -35701,22 +34836,15 @@ "value": 11 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ "git_reset_t", { - "decl": [ - "GIT_RESET_SOFT", - "GIT_RESET_MIXED", - "GIT_RESET_HARD" - ], + "decl": ["GIT_RESET_SOFT", "GIT_RESET_MIXED", "GIT_RESET_HARD"], "type": "enum", - "file": "reset.h", + "file": "git2/reset.h", "line": 26, "lineto": 30, "block": "GIT_RESET_SOFT\nGIT_RESET_MIXED\nGIT_RESET_HARD", @@ -35745,10 +34873,7 @@ ], "used": { "returns": [], - "needs": [ - "git_reset", - "git_reset_from_annotated" - ] + "needs": ["git_reset", "git_reset_from_annotated"] } } ], @@ -35763,7 +34888,7 @@ ], "type": "struct", "value": "git_revert_options", - "file": "revert.h", + "file": "git2/revert.h", "line": 26, "lineto": 34, "block": "unsigned int version\nunsigned int mainline\ngit_merge_options merge_opts\ngit_checkout_options checkout_opts", @@ -35771,11 +34896,7 @@ "description": " Options for revert", "comments": "", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "unsigned int", "name": "mainline", @@ -35786,74 +34907,25 @@ "name": "merge_opts", "comments": " Options for the merging " }, - { - "type": "git_checkout_options", - "name": "checkout_opts", - "comments": " Options for the checkout " - } - ], - "used": { - "returns": [], - "needs": [ - "git_revert", - "git_revert_init_options" - ] - } - } - ], - [ - "git_revparse_mode_t", - { - "decl": [ - "GIT_REVPARSE_SINGLE", - "GIT_REVPARSE_RANGE", - "GIT_REVPARSE_MERGE_BASE" - ], - "type": "enum", - "file": "revparse.h", - "line": 71, - "lineto": 78, - "block": "GIT_REVPARSE_SINGLE\nGIT_REVPARSE_RANGE\nGIT_REVPARSE_MERGE_BASE", - "tdef": "typedef", - "description": " Revparse flags. These indicate the intended behavior of the spec passed to\n git_revparse.", - "comments": "", - "fields": [ - { - "type": "int", - "name": "GIT_REVPARSE_SINGLE", - "comments": "

The spec targeted a single object.

\n", - "value": 1 - }, - { - "type": "int", - "name": "GIT_REVPARSE_RANGE", - "comments": "

The spec targeted a range of commits.

\n", - "value": 2 - }, - { - "type": "int", - "name": "GIT_REVPARSE_MERGE_BASE", - "comments": "

The spec used the '...' operator, which invokes special semantics.

\n", - "value": 4 + { + "type": "git_checkout_options", + "name": "checkout_opts", + "comments": " Options for the checkout " } ], "used": { "returns": [], - "needs": [] + "needs": ["git_revert", "git_revert_options_init"] } } ], [ "git_revspec", { - "decl": [ - "git_object * from", - "git_object * to", - "unsigned int flags" - ], + "decl": ["git_object * from", "git_object * to", "unsigned int flags"], "type": "struct", "value": "git_revspec", - "file": "revparse.h", + "file": "git2/revparse.h", "line": 83, "lineto": 90, "block": "git_object * from\ngit_object * to\nunsigned int flags", @@ -35874,15 +34946,49 @@ { "type": "unsigned int", "name": "flags", - "comments": " The intent of the revspec (i.e. `git_revparse_mode_t` flags) " + "comments": " The intent of the revspec (i.e. `git_revspec_mode_t` flags) " } ], - "used": { - "returns": [], - "needs": [ - "git_revparse" - ] - } + "used": { "returns": [], "needs": ["git_revparse"] } + } + ], + [ + "git_revspec_t", + { + "decl": [ + "GIT_REVSPEC_SINGLE", + "GIT_REVSPEC_RANGE", + "GIT_REVSPEC_MERGE_BASE" + ], + "type": "enum", + "file": "git2/revparse.h", + "line": 71, + "lineto": 78, + "block": "GIT_REVSPEC_SINGLE\nGIT_REVSPEC_RANGE\nGIT_REVSPEC_MERGE_BASE", + "tdef": "typedef", + "description": " Revparse flags. These indicate the intended behavior of the spec passed to\n git_revparse.", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_REVSPEC_SINGLE", + "comments": "

The spec targeted a single object.

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_REVSPEC_RANGE", + "comments": "

The spec targeted a range of commits.

\n", + "value": 2 + }, + { + "type": "int", + "name": "GIT_REVSPEC_MERGE_BASE", + "comments": "

The spec used the '...' operator, which invokes special semantics.

\n", + "value": 4 + } + ], + "used": { "returns": [], "needs": [] } } ], [ @@ -35891,9 +34997,9 @@ "decl": "git_revwalk", "type": "struct", "value": "git_revwalk", - "file": "types.h", - "line": 114, - "lineto": 114, + "file": "git2/types.h", + "line": 132, + "lineto": 132, "tdef": "typedef", "description": " Representation of an in-progress walk through the commits in a repo ", "comments": "", @@ -35925,16 +35031,12 @@ [ "git_signature", { - "decl": [ - "char * name", - "char * email", - "git_time when" - ], + "decl": ["char * name", "char * email", "git_time when"], "type": "struct", "value": "git_signature", - "file": "types.h", - "line": 166, - "lineto": 170, + "file": "git2/types.h", + "line": 187, + "lineto": 191, "block": "char * name\nchar * email\ngit_time when", "tdef": "typedef", "description": " An action signature (e.g. for committers, taggers, etc) ", @@ -35960,75 +35062,81 @@ "returns": [ "git_commit_author", "git_commit_committer", - "git_note_author", - "git_note_committer", "git_reflog_entry_committer", "git_tag_tagger" ], "needs": [ "git_commit_amend", + "git_commit_author_with_mailmap", + "git_commit_committer_with_mailmap", "git_commit_create", "git_commit_create_buffer", - "git_commit_create_from_callback", - "git_commit_create_from_ids", + "git_commit_create_cb", "git_commit_create_v", - "git_note_commit_create", - "git_note_commit_remove", - "git_note_create", - "git_note_remove", + "git_mailmap_resolve_signature", "git_rebase_commit", "git_rebase_finish", "git_reflog_append", "git_signature_default", + "git_signature_default_from_env", "git_signature_dup", "git_signature_free", "git_signature_from_buffer", "git_signature_new", "git_signature_now", + "git_stash_save", "git_tag_annotation_create", - "git_tag_create" + "git_tag_create", + "git_transaction_set_symbolic_target", + "git_transaction_set_target" ] } } ], [ - "git_smart_subtransport_definition", + "git_smart_service_t", { "decl": [ - "git_smart_subtransport_cb callback", - "unsigned int rpc", - "void * param" + "GIT_SERVICE_UPLOADPACK_LS", + "GIT_SERVICE_UPLOADPACK", + "GIT_SERVICE_RECEIVEPACK_LS", + "GIT_SERVICE_RECEIVEPACK" ], - "type": "struct", - "value": "git_smart_subtransport_definition", - "file": "sys/transport.h", - "line": 336, - "lineto": 349, - "block": "git_smart_subtransport_cb callback\nunsigned int rpc\nvoid * param", + "type": "enum", + "file": "git2/sys/transport.h", + "line": 323, + "lineto": 328, + "block": "GIT_SERVICE_UPLOADPACK_LS\nGIT_SERVICE_UPLOADPACK\nGIT_SERVICE_RECEIVEPACK_LS\nGIT_SERVICE_RECEIVEPACK", "tdef": "typedef", - "description": " Definition for a \"subtransport\"", - "comments": "

This is used to let the smart protocol code know about the protocol which you are implementing.

\n", + "description": " Actions that the smart transport can ask a subtransport to perform ", + "comments": "", "fields": [ { - "type": "git_smart_subtransport_cb", - "name": "callback", - "comments": " The function to use to create the git_smart_subtransport " + "type": "int", + "name": "GIT_SERVICE_UPLOADPACK_LS", + "comments": "", + "value": 1 }, { - "type": "unsigned int", - "name": "rpc", - "comments": " True if the protocol is stateless; false otherwise. For example,\n http:// is stateless, but git:// is not." + "type": "int", + "name": "GIT_SERVICE_UPLOADPACK", + "comments": "", + "value": 2 }, { - "type": "void *", - "name": "param", - "comments": " Param of the callback" + "type": "int", + "name": "GIT_SERVICE_RECEIVEPACK_LS", + "comments": "", + "value": 3 + }, + { + "type": "int", + "name": "GIT_SERVICE_RECEIVEPACK", + "comments": "", + "value": 4 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -36041,7 +35149,7 @@ "GIT_SORT_REVERSE" ], "type": "enum", - "file": "revwalk.h", + "file": "git2/revwalk.h", "line": 26, "lineto": 53, "block": "GIT_SORT_NONE\nGIT_SORT_TOPOLOGICAL\nGIT_SORT_TIME\nGIT_SORT_REVERSE", @@ -36052,13 +35160,13 @@ { "type": "int", "name": "GIT_SORT_NONE", - "comments": "

Sort the output with the same default time-order method from git.\n This is the default sorting for new walkers.

\n", + "comments": "

Sort the output with the same default method from git: reverse\n chronological order. This is the default sorting for new walkers.

\n", "value": 0 }, { "type": "int", "name": "GIT_SORT_TOPOLOGICAL", - "comments": "

Sort the repository contents in topological order (parents before\n children); this sorting mode can be combined with time sorting to\n produce git's "time-order".

\n", + "comments": "

Sort the repository contents in topological order (no parents before\n all of its children are shown); this sorting mode can be combined\n with time sorting to produce git's --date-order`.

\n", "value": 1 }, { @@ -36074,23 +35182,17 @@ "value": 4 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ "git_stash_apply_flags", { - "decl": [ - "GIT_STASH_APPLY_DEFAULT", - "GIT_STASH_APPLY_REINSTATE_INDEX" - ], + "decl": ["GIT_STASH_APPLY_DEFAULT", "GIT_STASH_APPLY_REINSTATE_INDEX"], "type": "enum", - "file": "stash.h", - "line": 74, - "lineto": 81, + "file": "git2/stash.h", + "line": 137, + "lineto": 144, "block": "GIT_STASH_APPLY_DEFAULT\nGIT_STASH_APPLY_REINSTATE_INDEX", "tdef": "typedef", "description": " Stash application flags. ", @@ -36109,12 +35211,131 @@ "value": 1 } ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_stash_apply_options", + { + "decl": [ + "unsigned int version", + "uint32_t flags", + "git_checkout_options checkout_options", + "git_stash_apply_progress_cb progress_cb", + "void * progress_payload" + ], + "type": "struct", + "value": "git_stash_apply_options", + "file": "git2/stash.h", + "line": 192, + "lineto": 204, + "block": "unsigned int version\nuint32_t flags\ngit_checkout_options checkout_options\ngit_stash_apply_progress_cb progress_cb\nvoid * progress_payload", + "tdef": "typedef", + "description": " Stash application options structure", + "comments": "

Initialize with GIT_STASH_APPLY_OPTIONS_INIT. Alternatively, you can use git_stash_apply_options_init.

\n", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "uint32_t", + "name": "flags", + "comments": " See `git_stash_apply_flags`, above. " + }, + { + "type": "git_checkout_options", + "name": "checkout_options", + "comments": " Options to use when writing files to the working directory. " + }, + { + "type": "git_stash_apply_progress_cb", + "name": "progress_cb", + "comments": " Optional callback to notify the consumer of application progress. " + }, + { "type": "void *", "name": "progress_payload", "comments": "" } + ], "used": { "returns": [], - "needs": [] + "needs": [ + "git_stash_apply", + "git_stash_apply_options_init", + "git_stash_pop" + ] } } ], + [ + "git_stash_apply_progress_t", + { + "decl": [ + "GIT_STASH_APPLY_PROGRESS_NONE", + "GIT_STASH_APPLY_PROGRESS_LOADING_STASH", + "GIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX", + "GIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED", + "GIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED", + "GIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED", + "GIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED", + "GIT_STASH_APPLY_PROGRESS_DONE" + ], + "type": "enum", + "file": "git2/stash.h", + "line": 147, + "lineto": 170, + "block": "GIT_STASH_APPLY_PROGRESS_NONE\nGIT_STASH_APPLY_PROGRESS_LOADING_STASH\nGIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX\nGIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED\nGIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED\nGIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED\nGIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED\nGIT_STASH_APPLY_PROGRESS_DONE", + "tdef": "typedef", + "description": " Stash apply progression states ", + "comments": "", + "fields": [ + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_NONE", + "comments": "", + "value": 0 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_LOADING_STASH", + "comments": "

Loading the stashed data from the object database.

\n", + "value": 1 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX", + "comments": "

The stored index is being analyzed.

\n", + "value": 2 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED", + "comments": "

The modified files are being analyzed.

\n", + "value": 3 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED", + "comments": "

The untracked and ignored files are being analyzed.

\n", + "value": 4 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED", + "comments": "

The untracked files are being written to disk.

\n", + "value": 5 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED", + "comments": "

The modified files are being written to disk.

\n", + "value": 6 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_DONE", + "comments": "

The stash was applied successfully.

\n", + "value": 7 + } + ], + "used": { "returns": [], "needs": ["git_stash_apply_progress_cb"] } + } + ], [ "git_stash_flags", { @@ -36122,13 +35343,14 @@ "GIT_STASH_DEFAULT", "GIT_STASH_KEEP_INDEX", "GIT_STASH_INCLUDE_UNTRACKED", - "GIT_STASH_INCLUDE_IGNORED" + "GIT_STASH_INCLUDE_IGNORED", + "GIT_STASH_KEEP_ALL" ], "type": "enum", - "file": "stash.h", - "line": 24, - "lineto": 47, - "block": "GIT_STASH_DEFAULT\nGIT_STASH_KEEP_INDEX\nGIT_STASH_INCLUDE_UNTRACKED\nGIT_STASH_INCLUDE_IGNORED", + "file": "git2/stash.h", + "line": 30, + "lineto": 58, + "block": "GIT_STASH_DEFAULT\nGIT_STASH_KEEP_INDEX\nGIT_STASH_INCLUDE_UNTRACKED\nGIT_STASH_INCLUDE_IGNORED\nGIT_STASH_KEEP_ALL", "tdef": "typedef", "description": " Stash flags", "comments": "", @@ -36156,23 +35378,107 @@ "name": "GIT_STASH_INCLUDE_IGNORED", "comments": "

All ignored files are also stashed and then cleaned up from\n the working directory

\n", "value": 4 + }, + { + "type": "int", + "name": "GIT_STASH_KEEP_ALL", + "comments": "

All changes in the index and working directory are left intact

\n", + "value": 8 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_stash_save_options", + { + "decl": [ + "unsigned int version", + "uint32_t flags", + "const git_signature * stasher", + "const char * message", + "git_strarray paths" + ], + "type": "struct", + "value": "git_stash_save_options", + "file": "git2/stash.h", + "line": 86, + "lineto": 100, + "block": "unsigned int version\nuint32_t flags\nconst git_signature * stasher\nconst char * message\ngit_strarray paths", + "tdef": "typedef", + "description": " Stash save options structure", + "comments": "

Initialize with GIT_STASH_SAVE_OPTIONS_INIT. Alternatively, you can use git_stash_save_options_init.

\n", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "uint32_t", + "name": "flags", + "comments": " Flags to control the stashing process. (see GIT_STASH_* above) " + }, + { + "type": "const git_signature *", + "name": "stasher", + "comments": " The identity of the person performing the stashing. " + }, + { + "type": "const char *", + "name": "message", + "comments": " Optional description along with the stashed state. " + }, + { + "type": "git_strarray", + "name": "paths", + "comments": " Optional paths that control which files are stashed. " } ], "used": { "returns": [], - "needs": [] + "needs": ["git_stash_save_options_init", "git_stash_save_with_opts"] } } ], + [ + "git_status_entry", + { + "decl": [ + "git_status_t status", + "git_diff_delta * head_to_index", + "git_diff_delta * index_to_workdir" + ], + "type": "struct", + "value": "git_status_entry", + "file": "git2/status.h", + "line": 298, + "lineto": 302, + "block": "git_status_t status\ngit_diff_delta * head_to_index\ngit_diff_delta * index_to_workdir", + "tdef": "typedef", + "description": " A status entry, providing the differences between the file as it exists\n in HEAD and the index, and providing the differences between the index\n and the working directory.", + "comments": "

The status value provides the status flags for this file.

\n\n

The head_to_index value provides detailed information about the differences between the file in HEAD and the file in the index.

\n\n

The index_to_workdir value provides detailed information about the differences between the file in the index and the file in the working directory.

\n", + "fields": [ + { "type": "git_status_t", "name": "status", "comments": "" }, + { + "type": "git_diff_delta *", + "name": "head_to_index", + "comments": "" + }, + { + "type": "git_diff_delta *", + "name": "index_to_workdir", + "comments": "" + } + ], + "used": { "returns": ["git_status_byindex"], "needs": [] } + } + ], [ "git_status_list", { "decl": "git_status_list", "type": "struct", "value": "git_status_list", - "file": "types.h", - "line": 188, - "lineto": 188, + "file": "git2/types.h", + "line": 217, + "lineto": 217, "tdef": "typedef", "description": " Representation of a status collection ", "comments": "", @@ -36182,7 +35488,6 @@ "git_status_byindex", "git_status_list_entrycount", "git_status_list_free", - "git_status_list_get_perfdata", "git_status_list_new" ] } @@ -36210,114 +35515,173 @@ "GIT_STATUS_OPT_INCLUDE_UNREADABLE_AS_UNTRACKED" ], "type": "enum", - "file": "status.h", - "line": 137, - "lineto": 154, + "file": "git2/status.h", + "line": 100, + "lineto": 207, "block": "GIT_STATUS_OPT_INCLUDE_UNTRACKED\nGIT_STATUS_OPT_INCLUDE_IGNORED\nGIT_STATUS_OPT_INCLUDE_UNMODIFIED\nGIT_STATUS_OPT_EXCLUDE_SUBMODULES\nGIT_STATUS_OPT_RECURSE_UNTRACKED_DIRS\nGIT_STATUS_OPT_DISABLE_PATHSPEC_MATCH\nGIT_STATUS_OPT_RECURSE_IGNORED_DIRS\nGIT_STATUS_OPT_RENAMES_HEAD_TO_INDEX\nGIT_STATUS_OPT_RENAMES_INDEX_TO_WORKDIR\nGIT_STATUS_OPT_SORT_CASE_SENSITIVELY\nGIT_STATUS_OPT_SORT_CASE_INSENSITIVELY\nGIT_STATUS_OPT_RENAMES_FROM_REWRITES\nGIT_STATUS_OPT_NO_REFRESH\nGIT_STATUS_OPT_UPDATE_INDEX\nGIT_STATUS_OPT_INCLUDE_UNREADABLE\nGIT_STATUS_OPT_INCLUDE_UNREADABLE_AS_UNTRACKED", "tdef": "typedef", "description": " Flags to control status callbacks", - "comments": "\n\n

Calling git_status_foreach() is like calling the extended version with: GIT_STATUS_OPT_INCLUDE_IGNORED, GIT_STATUS_OPT_INCLUDE_UNTRACKED, and GIT_STATUS_OPT_RECURSE_UNTRACKED_DIRS. Those options are bundled together as GIT_STATUS_OPT_DEFAULTS if you want them as a baseline.

\n", + "comments": "

Calling git_status_foreach() is like calling the extended version with: GIT_STATUS_OPT_INCLUDE_IGNORED, GIT_STATUS_OPT_INCLUDE_UNTRACKED, and GIT_STATUS_OPT_RECURSE_UNTRACKED_DIRS. Those options are bundled together as GIT_STATUS_OPT_DEFAULTS if you want them as a baseline.

\n", "fields": [ { "type": "int", "name": "GIT_STATUS_OPT_INCLUDE_UNTRACKED", - "comments": "", + "comments": "

Says that callbacks should be made on untracked files.\n These will only be made if the workdir files are included in the status\n "show" option.

\n", "value": 1 }, { "type": "int", "name": "GIT_STATUS_OPT_INCLUDE_IGNORED", - "comments": "", + "comments": "

Says that ignored files get callbacks.\n Again, these callbacks will only be made if the workdir files are\n included in the status "show" option.

\n", "value": 2 }, { "type": "int", "name": "GIT_STATUS_OPT_INCLUDE_UNMODIFIED", - "comments": "", + "comments": "

Indicates that callback should be made even on unmodified files.

\n", "value": 4 }, { "type": "int", "name": "GIT_STATUS_OPT_EXCLUDE_SUBMODULES", - "comments": "", + "comments": "

Indicates that submodules should be skipped.\n This only applies if there are no pending typechanges to the submodule\n (either from or to another type).

\n", "value": 8 }, { "type": "int", "name": "GIT_STATUS_OPT_RECURSE_UNTRACKED_DIRS", - "comments": "", + "comments": "

Indicates that all files in untracked directories should be included.\n Normally if an entire directory is new, then just the top-level\n directory is included (with a trailing slash on the entry name).\n This flag says to include all of the individual files in the directory\n instead.

\n", "value": 16 }, { "type": "int", "name": "GIT_STATUS_OPT_DISABLE_PATHSPEC_MATCH", - "comments": "", + "comments": "

Indicates that the given path should be treated as a literal path,\n and not as a pathspec pattern.

\n", "value": 32 }, { "type": "int", "name": "GIT_STATUS_OPT_RECURSE_IGNORED_DIRS", - "comments": "", + "comments": "

Indicates that the contents of ignored directories should be included\n in the status. This is like doing git ls-files -o -i --exclude-standard\n with core git.

\n", "value": 64 }, { "type": "int", "name": "GIT_STATUS_OPT_RENAMES_HEAD_TO_INDEX", - "comments": "", + "comments": "

Indicates that rename detection should be processed between the head and\n the index and enables the GIT_STATUS_INDEX_RENAMED as a possible status\n flag.

\n", "value": 128 }, { "type": "int", "name": "GIT_STATUS_OPT_RENAMES_INDEX_TO_WORKDIR", - "comments": "", + "comments": "

Indicates that rename detection should be run between the index and the\n working directory and enabled GIT_STATUS_WT_RENAMED as a possible status\n flag.

\n", "value": 256 }, { "type": "int", "name": "GIT_STATUS_OPT_SORT_CASE_SENSITIVELY", - "comments": "", + "comments": "

Overrides the native case sensitivity for the file system and forces\n the output to be in case-sensitive order.

\n", "value": 512 }, { "type": "int", "name": "GIT_STATUS_OPT_SORT_CASE_INSENSITIVELY", - "comments": "", + "comments": "

Overrides the native case sensitivity for the file system and forces\n the output to be in case-insensitive order.

\n", "value": 1024 }, { - "type": "int", - "name": "GIT_STATUS_OPT_RENAMES_FROM_REWRITES", - "comments": "", - "value": 2048 + "type": "int", + "name": "GIT_STATUS_OPT_RENAMES_FROM_REWRITES", + "comments": "

Iindicates that rename detection should include rewritten files.

\n", + "value": 2048 + }, + { + "type": "int", + "name": "GIT_STATUS_OPT_NO_REFRESH", + "comments": "

Bypasses the default status behavior of doing a "soft" index reload\n (i.e. reloading the index data if the file on disk has been modified\n outside libgit2).

\n", + "value": 4096 + }, + { + "type": "int", + "name": "GIT_STATUS_OPT_UPDATE_INDEX", + "comments": "

Tells libgit2 to refresh the stat cache in the index for files that are\n unchanged but have out of date stat einformation in the index.\n It will result in less work being done on subsequent calls to get status.\n This is mutually exclusive with the NO_REFRESH option.

\n", + "value": 8192 + }, + { + "type": "int", + "name": "GIT_STATUS_OPT_INCLUDE_UNREADABLE", + "comments": "

Normally files that cannot be opened or read are ignored as\n these are often transient files; this option will return\n unreadable files as GIT_STATUS_WT_UNREADABLE.

\n", + "value": 16384 + }, + { + "type": "int", + "name": "GIT_STATUS_OPT_INCLUDE_UNREADABLE_AS_UNTRACKED", + "comments": "

Unreadable files will be detected and given the status\n untracked instead of unreadable.

\n", + "value": 32768 + } + ], + "used": { "returns": [], "needs": [] } + } + ], + [ + "git_status_options", + { + "decl": [ + "unsigned int version", + "git_status_show_t show", + "unsigned int flags", + "git_strarray pathspec", + "git_tree * baseline", + "uint16_t rename_threshold" + ], + "type": "struct", + "value": "git_status_options", + "file": "git2/status.h", + "line": 222, + "lineto": 262, + "block": "unsigned int version\ngit_status_show_t show\nunsigned int flags\ngit_strarray pathspec\ngit_tree * baseline\nuint16_t rename_threshold", + "tdef": "typedef", + "description": " Options to control how `git_status_foreach_ext()` will issue callbacks.", + "comments": "

Initialize with GIT_STATUS_OPTIONS_INIT. Alternatively, you can use git_status_options_init.

\n", + "fields": [ + { + "type": "unsigned int", + "name": "version", + "comments": " The struct version; pass `GIT_STATUS_OPTIONS_VERSION`." + }, + { + "type": "git_status_show_t", + "name": "show", + "comments": " The `show` value is one of the `git_status_show_t` constants that\n control which files to scan and in what order. The default is\n `GIT_STATUS_SHOW_INDEX_AND_WORKDIR`." }, { - "type": "int", - "name": "GIT_STATUS_OPT_NO_REFRESH", - "comments": "", - "value": 4096 + "type": "unsigned int", + "name": "flags", + "comments": " The `flags` value is an OR'ed combination of the\n `git_status_opt_t` values above. The default is\n `GIT_STATUS_OPT_DEFAULTS`, which matches git's default\n behavior." }, { - "type": "int", - "name": "GIT_STATUS_OPT_UPDATE_INDEX", - "comments": "", - "value": 8192 + "type": "git_strarray", + "name": "pathspec", + "comments": " The `pathspec` is an array of path patterns to match (using\n fnmatch-style matching), or just an array of paths to match\n exactly if `GIT_STATUS_OPT_DISABLE_PATHSPEC_MATCH` is specified\n in the flags." }, { - "type": "int", - "name": "GIT_STATUS_OPT_INCLUDE_UNREADABLE", - "comments": "", - "value": 16384 + "type": "git_tree *", + "name": "baseline", + "comments": " The `baseline` is the tree to be used for comparison to the\n working directory and index; defaults to HEAD." }, { - "type": "int", - "name": "GIT_STATUS_OPT_INCLUDE_UNREADABLE_AS_UNTRACKED", - "comments": "", - "value": 32768 + "type": "uint16_t", + "name": "rename_threshold", + "comments": " Threshold above which similar files will be considered renames.\n This is equivalent to the -M option. Defaults to 50." } ], "used": { "returns": [], - "needs": [] + "needs": [ + "git_status_foreach_ext", + "git_status_list_new", + "git_status_options_init" + ] } } ], @@ -36330,37 +35694,34 @@ "GIT_STATUS_SHOW_WORKDIR_ONLY" ], "type": "enum", - "file": "status.h", - "line": 79, - "lineto": 83, + "file": "git2/status.h", + "line": 72, + "lineto": 90, "block": "GIT_STATUS_SHOW_INDEX_AND_WORKDIR\nGIT_STATUS_SHOW_INDEX_ONLY\nGIT_STATUS_SHOW_WORKDIR_ONLY", "tdef": "typedef", "description": " Select the files on which to report status.", - "comments": "

With git_status_foreach_ext, this will control which changes get callbacks. With git_status_list_new, these will control which changes are included in the list.

\n\n\n", + "comments": "

With git_status_foreach_ext, this will control which changes get callbacks. With git_status_list_new, these will control which changes are included in the list.

\n", "fields": [ { "type": "int", "name": "GIT_STATUS_SHOW_INDEX_AND_WORKDIR", - "comments": "", + "comments": "

The default. This roughly matches git status --porcelain regarding\n which files are included and in what order.

\n", "value": 0 }, { "type": "int", "name": "GIT_STATUS_SHOW_INDEX_ONLY", - "comments": "", + "comments": "

Only gives status based on HEAD to index comparison, not looking at\n working directory changes.

\n", "value": 1 }, { "type": "int", "name": "GIT_STATUS_SHOW_WORKDIR_ONLY", - "comments": "", + "comments": "

Only gives status based on index to working directory comparison,\n not comparing the index to the HEAD.

\n", "value": 2 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -36383,9 +35744,9 @@ "GIT_STATUS_CONFLICTED" ], "type": "enum", - "file": "status.h", - "line": 32, - "lineto": 50, + "file": "git2/status.h", + "line": 34, + "lineto": 52, "block": "GIT_STATUS_CURRENT\nGIT_STATUS_INDEX_NEW\nGIT_STATUS_INDEX_MODIFIED\nGIT_STATUS_INDEX_DELETED\nGIT_STATUS_INDEX_RENAMED\nGIT_STATUS_INDEX_TYPECHANGE\nGIT_STATUS_WT_NEW\nGIT_STATUS_WT_MODIFIED\nGIT_STATUS_WT_DELETED\nGIT_STATUS_WT_TYPECHANGE\nGIT_STATUS_WT_RENAMED\nGIT_STATUS_WT_UNREADABLE\nGIT_STATUS_IGNORED\nGIT_STATUS_CONFLICTED", "tdef": "typedef", "description": " Status flags for a single file.", @@ -36476,22 +35837,16 @@ "value": 32768 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ "git_strarray", { - "decl": [ - "char ** strings", - "size_t count" - ], + "decl": ["char ** strings", "size_t count"], "type": "struct", "value": "git_strarray", - "file": "strarray.h", + "file": "git2/strarray.h", "line": 22, "lineto": 25, "block": "char ** strings\nsize_t count", @@ -36499,16 +35854,8 @@ "description": " Array of strings ", "comments": "", "fields": [ - { - "type": "char **", - "name": "strings", - "comments": "" - }, - { - "type": "size_t", - "name": "count", - "comments": "" - } + { "type": "char **", "name": "strings", "comments": "" }, + { "type": "size_t", "name": "count", "comments": "" } ], "used": { "returns": [], @@ -36529,6 +35876,7 @@ "git_remote_upload", "git_reset_default", "git_strarray_copy", + "git_strarray_dispose", "git_strarray_free", "git_tag_list", "git_tag_list_match", @@ -36538,87 +35886,32 @@ } ], [ - "git_stream", + "git_stream_t", { - "decl": [ - "int version", - "int encrypted", - "int proxy_support", - "int (*)(struct git_stream *) connect", - "int (*)(git_cert **, struct git_stream *) certificate", - "int (*)(struct git_stream *, const git_proxy_options *) set_proxy", - "ssize_t (*)(struct git_stream *, void *, size_t) read", - "ssize_t (*)(struct git_stream *, const char *, size_t, int) write", - "int (*)(struct git_stream *) close", - "void (*)(struct git_stream *) free" - ], - "type": "struct", - "value": "git_stream", - "file": "sys/stream.h", - "line": 29, - "lineto": 41, - "block": "int version\nint encrypted\nint proxy_support\nint (*)(struct git_stream *) connect\nint (*)(git_cert **, struct git_stream *) certificate\nint (*)(struct git_stream *, const git_proxy_options *) set_proxy\nssize_t (*)(struct git_stream *, void *, size_t) read\nssize_t (*)(struct git_stream *, const char *, size_t, int) write\nint (*)(struct git_stream *) close\nvoid (*)(struct git_stream *) free", + "decl": ["GIT_STREAM_STANDARD", "GIT_STREAM_TLS"], + "type": "enum", + "file": "git2/sys/stream.h", + "line": 99, + "lineto": 105, + "block": "GIT_STREAM_STANDARD\nGIT_STREAM_TLS", "tdef": "typedef", - "description": " Every stream must have this struct as its first element, so the\n API can talk to it. You'd define your stream as", - "comments": "
 struct my_stream {             git_stream parent;             ...     }\n
\n\n

and fill the functions

\n", + "description": " The type of stream to register.", + "comments": "", "fields": [ { "type": "int", - "name": "version", - "comments": "" - }, - { - "type": "int", - "name": "encrypted", - "comments": "" + "name": "GIT_STREAM_STANDARD", + "comments": "

A standard (non-TLS) socket.

\n", + "value": 1 }, { "type": "int", - "name": "proxy_support", - "comments": "" - }, - { - "type": "int (*)(struct git_stream *)", - "name": "connect", - "comments": "" - }, - { - "type": "int (*)(git_cert **, struct git_stream *)", - "name": "certificate", - "comments": "" - }, - { - "type": "int (*)(struct git_stream *, const git_proxy_options *)", - "name": "set_proxy", - "comments": "" - }, - { - "type": "ssize_t (*)(struct git_stream *, void *, size_t)", - "name": "read", - "comments": "" - }, - { - "type": "ssize_t (*)(struct git_stream *, const char *, size_t, int)", - "name": "write", - "comments": "" - }, - { - "type": "int (*)(struct git_stream *)", - "name": "close", - "comments": "" - }, - { - "type": "void (*)(struct git_stream *)", - "name": "free", - "comments": "" + "name": "GIT_STREAM_TLS", + "comments": "

A TLS-encrypted socket.

\n", + "value": 2 } ], - "used": { - "returns": [], - "needs": [ - "git_stream_register_tls" - ] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -36627,9 +35920,9 @@ "decl": "git_submodule", "type": "struct", "value": "git_submodule", - "file": "types.h", - "line": 339, - "lineto": 339, + "file": "git2/types.h", + "line": 283, + "lineto": 283, "tdef": "typedef", "description": " Opaque structure representing a submodule.", "comments": "", @@ -36645,6 +35938,8 @@ "git_submodule_add_to_index", "git_submodule_branch", "git_submodule_cb", + "git_submodule_clone", + "git_submodule_dup", "git_submodule_fetch_recurse_submodules", "git_submodule_foreach", "git_submodule_free", @@ -36666,7 +35961,7 @@ "git_submodule_status", "git_submodule_sync", "git_submodule_update", - "git_submodule_update_init_options", + "git_submodule_update_options_init", "git_submodule_update_strategy", "git_submodule_url", "git_submodule_wd_id" @@ -36685,9 +35980,9 @@ "GIT_SUBMODULE_IGNORE_ALL" ], "type": "enum", - "file": "types.h", - "line": 403, - "lineto": 410, + "file": "git2/types.h", + "line": 347, + "lineto": 354, "block": "GIT_SUBMODULE_IGNORE_UNSPECIFIED\nGIT_SUBMODULE_IGNORE_NONE\nGIT_SUBMODULE_IGNORE_UNTRACKED\nGIT_SUBMODULE_IGNORE_DIRTY\nGIT_SUBMODULE_IGNORE_ALL", "tdef": "typedef", "description": " Submodule ignore values", @@ -36725,13 +36020,8 @@ } ], "used": { - "returns": [ - "git_submodule_ignore" - ], - "needs": [ - "git_submodule_set_ignore", - "git_submodule_status" - ] + "returns": ["git_submodule_ignore"], + "needs": ["git_submodule_set_ignore", "git_submodule_status"] } } ], @@ -36744,9 +36034,9 @@ "GIT_SUBMODULE_RECURSE_ONDEMAND" ], "type": "enum", - "file": "types.h", - "line": 422, - "lineto": 426, + "file": "git2/types.h", + "line": 366, + "lineto": 370, "block": "GIT_SUBMODULE_RECURSE_NO\nGIT_SUBMODULE_RECURSE_YES\nGIT_SUBMODULE_RECURSE_ONDEMAND", "tdef": "typedef", "description": " Options for submodule recurse.", @@ -36772,12 +36062,8 @@ } ], "used": { - "returns": [ - "git_submodule_fetch_recurse_submodules" - ], - "needs": [ - "git_submodule_set_fetch_recurse_submodules" - ] + "returns": ["git_submodule_fetch_recurse_submodules"], + "needs": ["git_submodule_set_fetch_recurse_submodules"] } } ], @@ -36801,7 +36087,7 @@ "GIT_SUBMODULE_STATUS_WD_UNTRACKED" ], "type": "enum", - "file": "submodule.h", + "file": "git2/submodule.h", "line": 74, "lineto": 89, "block": "GIT_SUBMODULE_STATUS_IN_HEAD\nGIT_SUBMODULE_STATUS_IN_INDEX\nGIT_SUBMODULE_STATUS_IN_CONFIG\nGIT_SUBMODULE_STATUS_IN_WD\nGIT_SUBMODULE_STATUS_INDEX_ADDED\nGIT_SUBMODULE_STATUS_INDEX_DELETED\nGIT_SUBMODULE_STATUS_INDEX_MODIFIED\nGIT_SUBMODULE_STATUS_WD_UNINITIALIZED\nGIT_SUBMODULE_STATUS_WD_ADDED\nGIT_SUBMODULE_STATUS_WD_DELETED\nGIT_SUBMODULE_STATUS_WD_MODIFIED\nGIT_SUBMODULE_STATUS_WD_INDEX_MODIFIED\nGIT_SUBMODULE_STATUS_WD_WD_MODIFIED\nGIT_SUBMODULE_STATUS_WD_UNTRACKED", @@ -36894,10 +36180,7 @@ "value": 8192 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -36911,23 +36194,19 @@ ], "type": "struct", "value": "git_submodule_update_options", - "file": "submodule.h", - "line": 129, - "lineto": 154, + "file": "git2/submodule.h", + "line": 135, + "lineto": 158, "block": "unsigned int version\ngit_checkout_options checkout_opts\ngit_fetch_options fetch_opts\nint allow_fetch", "tdef": "typedef", "description": " Submodule update options structure", - "comments": "

Use the GIT_SUBMODULE_UPDATE_OPTIONS_INIT to get the default settings, like this:

\n\n

git_submodule_update_options opts = GIT_SUBMODULE_UPDATE_OPTIONS_INIT;

\n", + "comments": "

Initialize with GIT_SUBMODULE_UPDATE_OPTIONS_INIT. Alternatively, you can use git_submodule_update_options_init.

\n", "fields": [ - { - "type": "unsigned int", - "name": "version", - "comments": "" - }, + { "type": "unsigned int", "name": "version", "comments": "" }, { "type": "git_checkout_options", "name": "checkout_opts", - "comments": " These options are passed to the checkout step. To disable\n checkout, set the `checkout_strategy` to\n `GIT_CHECKOUT_NONE`. Generally you will want the use\n GIT_CHECKOUT_SAFE to update files in the working\n directory. " + "comments": " These options are passed to the checkout step. To disable\n checkout, set the `checkout_strategy` to `GIT_CHECKOUT_NONE`\n or `GIT_CHECKOUT_DRY_RUN`." }, { "type": "git_fetch_options", @@ -36943,8 +36222,9 @@ "used": { "returns": [], "needs": [ + "git_submodule_clone", "git_submodule_update", - "git_submodule_update_init_options" + "git_submodule_update_options_init" ] } } @@ -36960,9 +36240,9 @@ "GIT_SUBMODULE_UPDATE_DEFAULT" ], "type": "enum", - "file": "types.h", - "line": 367, - "lineto": 374, + "file": "git2/types.h", + "line": 311, + "lineto": 318, "block": "GIT_SUBMODULE_UPDATE_CHECKOUT\nGIT_SUBMODULE_UPDATE_REBASE\nGIT_SUBMODULE_UPDATE_MERGE\nGIT_SUBMODULE_UPDATE_NONE\nGIT_SUBMODULE_UPDATE_DEFAULT", "tdef": "typedef", "description": " Submodule update values", @@ -37000,12 +36280,8 @@ } ], "used": { - "returns": [ - "git_submodule_update_strategy" - ], - "needs": [ - "git_submodule_set_update" - ] + "returns": ["git_submodule_update_strategy"], + "needs": ["git_submodule_set_update"] } } ], @@ -37015,9 +36291,9 @@ "decl": "git_tag", "type": "struct", "value": "git_tag", - "file": "types.h", - "line": 117, - "lineto": 117, + "file": "git2/types.h", + "line": 135, + "lineto": 135, "tdef": "typedef", "description": " Parsed representation of a tag object. ", "comments": "", @@ -37045,16 +36321,12 @@ [ "git_time", { - "decl": [ - "git_time_t time", - "int offset", - "char sign" - ], + "decl": ["git_time_t time", "int offset", "char sign"], "type": "struct", "value": "git_time", - "file": "types.h", - "line": 159, - "lineto": 163, + "file": "git2/types.h", + "line": 180, + "lineto": 184, "block": "git_time_t time\nint offset\nchar sign", "tdef": "typedef", "description": " Time in a signature ", @@ -37077,12 +36349,8 @@ } ], "used": { - "returns": [ - "git_commit_time" - ], - "needs": [ - "git_signature_new" - ] + "returns": ["git_commit_time"], + "needs": ["git_signature_new"] } } ], @@ -37099,7 +36367,7 @@ "GIT_TRACE_TRACE" ], "type": "enum", - "file": "trace.h", + "file": "git2/trace.h", "line": 26, "lineto": 47, "block": "GIT_TRACE_NONE\nGIT_TRACE_FATAL\nGIT_TRACE_ERROR\nGIT_TRACE_WARN\nGIT_TRACE_INFO\nGIT_TRACE_DEBUG\nGIT_TRACE_TRACE", @@ -37150,13 +36418,7 @@ "value": 6 } ], - "used": { - "returns": [], - "needs": [ - "git_trace_callback", - "git_trace_set" - ] - } + "used": { "returns": [], "needs": ["git_trace_cb", "git_trace_set"] } } ], [ @@ -37164,164 +36426,53 @@ { "decl": "git_transaction", "type": "struct", - "value": "git_transaction", - "file": "types.h", - "line": 179, - "lineto": 179, - "tdef": "typedef", - "description": " Transactional interface to references ", - "comments": "", - "used": { - "returns": [], - "needs": [ - "git_config_lock" - ] - } - } - ], - [ - "git_transfer_progress", - { - "decl": [ - "unsigned int total_objects", - "unsigned int indexed_objects", - "unsigned int received_objects", - "unsigned int local_objects", - "unsigned int total_deltas", - "unsigned int indexed_deltas", - "size_t received_bytes" - ], - "type": "struct", - "value": "git_transfer_progress", - "file": "types.h", - "line": 257, - "lineto": 265, - "block": "unsigned int total_objects\nunsigned int indexed_objects\nunsigned int received_objects\nunsigned int local_objects\nunsigned int total_deltas\nunsigned int indexed_deltas\nsize_t received_bytes", - "tdef": "typedef", - "description": " This is passed as the first argument to the callback to allow the\n user to see the progress.", - "comments": "\n", - "fields": [ - { - "type": "unsigned int", - "name": "total_objects", - "comments": "" - }, - { - "type": "unsigned int", - "name": "indexed_objects", - "comments": "" - }, - { - "type": "unsigned int", - "name": "received_objects", - "comments": "" - }, - { - "type": "unsigned int", - "name": "local_objects", - "comments": "" - }, - { - "type": "unsigned int", - "name": "total_deltas", - "comments": "" - }, - { - "type": "unsigned int", - "name": "indexed_deltas", - "comments": "" - }, - { - "type": "size_t", - "name": "received_bytes", - "comments": "" - } - ], - "used": { - "returns": [ - "git_remote_stats" - ], - "needs": [ - "git_indexer_append", - "git_indexer_commit", - "git_indexer_new", - "git_odb_write_pack", - "git_packbuilder_write", - "git_transfer_progress_cb" - ] - } - } - ], - [ - "git_transport", - { - "decl": "git_transport", - "type": "struct", - "value": "git_transport", - "file": "types.h", - "line": 234, - "lineto": 234, - "tdef": "typedef", - "description": " Interface which represents a transport to communicate with a\n remote.", - "comments": "", - "used": { - "returns": [], - "needs": [ - "git_smart_subtransport_git", - "git_smart_subtransport_http", - "git_smart_subtransport_ssh", - "git_transport_cb", - "git_transport_dummy", - "git_transport_init", - "git_transport_local", - "git_transport_new", - "git_transport_register", - "git_transport_smart", - "git_transport_smart_certificate_check", - "git_transport_smart_credentials", - "git_transport_smart_proxy_options", - "git_transport_ssh_with_paths" - ] - } - } - ], - [ - "git_transport_flags_t", - { - "decl": [ - "GIT_TRANSPORTFLAGS_NONE" - ], - "type": "enum", - "file": "sys/transport.h", - "line": 31, - "lineto": 33, - "block": "GIT_TRANSPORTFLAGS_NONE", - "tdef": "typedef", - "description": " Flags to pass to transport", - "comments": "

Currently unused.

\n", - "fields": [ - { - "type": "int", - "name": "GIT_TRANSPORTFLAGS_NONE", - "comments": "", - "value": 0 - } - ], + "value": "git_transaction", + "file": "git2/types.h", + "line": 200, + "lineto": 200, + "tdef": "typedef", + "description": " Transactional interface to references ", + "comments": "", "used": { "returns": [], - "needs": [] + "needs": [ + "git_config_lock", + "git_transaction_commit", + "git_transaction_free", + "git_transaction_lock_ref", + "git_transaction_new", + "git_transaction_remove", + "git_transaction_set_reflog", + "git_transaction_set_symbolic_target", + "git_transaction_set_target" + ] } } ], + [ + "git_transport", + { + "decl": "git_transport", + "type": "struct", + "value": "git_transport", + "file": "git2/types.h", + "line": 263, + "lineto": 263, + "tdef": "typedef", + "description": " Interface which represents a transport to communicate with a\n remote.", + "comments": "", + "used": { "returns": [], "needs": ["git_transport_cb"] } + } + ], [ "git_tree", { "decl": "git_tree", "type": "struct", "value": "git_tree", - "file": "types.h", - "line": 129, - "lineto": 129, + "file": "git2/types.h", + "line": 147, + "lineto": 147, "tdef": "typedef", "description": " Representation of a tree object. ", "comments": "", @@ -37333,9 +36484,11 @@ "git_treebuilder_get" ], "needs": [ + "git_apply_to_tree", "git_commit_amend", "git_commit_create", "git_commit_create_buffer", + "git_commit_create_cb", "git_commit_create_v", "git_commit_tree", "git_diff_tree_to_index", @@ -37389,9 +36542,9 @@ "decl": "git_tree_entry", "type": "struct", "value": "git_tree_entry", - "file": "types.h", - "line": 126, - "lineto": 126, + "file": "git2/types.h", + "line": 144, + "lineto": 144, "tdef": "typedef", "description": " Representation of each one of the entries in a tree object. ", "comments": "", @@ -37431,9 +36584,9 @@ ], "type": "struct", "value": "git_tree_update", - "file": "tree.h", - "line": 448, - "lineto": 457, + "file": "git2/tree.h", + "line": 449, + "lineto": 458, "block": "git_tree_update_t action\ngit_oid id\ngit_filemode_t filemode\nconst char * path", "tdef": "typedef", "description": " An action to perform during the update of a tree", @@ -37444,11 +36597,7 @@ "name": "action", "comments": " Update action. If it's an removal, only the path is looked at " }, - { - "type": "git_oid", - "name": "id", - "comments": " The entry's id " - }, + { "type": "git_oid", "name": "id", "comments": " The entry's id " }, { "type": "git_filemode_t", "name": "filemode", @@ -37460,25 +36609,17 @@ "comments": " The full path from the root tree " } ], - "used": { - "returns": [], - "needs": [ - "git_tree_create_updated" - ] - } + "used": { "returns": [], "needs": ["git_tree_create_updated"] } } ], [ "git_tree_update_t", { - "decl": [ - "GIT_TREE_UPDATE_UPSERT", - "GIT_TREE_UPDATE_REMOVE" - ], + "decl": ["GIT_TREE_UPDATE_UPSERT", "GIT_TREE_UPDATE_REMOVE"], "type": "enum", - "file": "tree.h", - "line": 438, - "lineto": 443, + "file": "git2/tree.h", + "line": 439, + "lineto": 444, "block": "GIT_TREE_UPDATE_UPSERT\nGIT_TREE_UPDATE_REMOVE", "tdef": "typedef", "description": " The kind of update to perform", @@ -37497,10 +36638,7 @@ "value": 1 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ @@ -37509,9 +36647,9 @@ "decl": "git_treebuilder", "type": "struct", "value": "git_treebuilder", - "file": "types.h", - "line": 132, - "lineto": 132, + "file": "git2/types.h", + "line": 150, + "lineto": 150, "tdef": "typedef", "description": " Constructor for in-memory trees ", "comments": "", @@ -37535,12 +36673,9 @@ [ "git_treewalk_mode", { - "decl": [ - "GIT_TREEWALK_PRE", - "GIT_TREEWALK_POST" - ], + "decl": ["GIT_TREEWALK_PRE", "GIT_TREEWALK_POST"], "type": "enum", - "file": "tree.h", + "file": "git2/tree.h", "line": 398, "lineto": 401, "block": "GIT_TREEWALK_PRE\nGIT_TREEWALK_POST", @@ -37561,12 +36696,7 @@ "value": 1 } ], - "used": { - "returns": [], - "needs": [ - "git_tree_walk" - ] - } + "used": { "returns": [], "needs": ["git_tree_walk"] } } ], [ @@ -37575,9 +36705,9 @@ "decl": "git_worktree", "type": "struct", "value": "git_worktree", - "file": "types.h", - "line": 108, - "lineto": 108, + "file": "git2/types.h", + "line": 126, + "lineto": 126, "tdef": "typedef", "description": " Representation of a working tree ", "comments": "", @@ -37586,21 +36716,102 @@ "needs": [ "git_repository_open_from_worktree", "git_worktree_add", - "git_worktree_add_init_options", + "git_worktree_add_options_init", "git_worktree_free", "git_worktree_is_locked", "git_worktree_is_prunable", "git_worktree_lock", "git_worktree_lookup", + "git_worktree_name", "git_worktree_open_from_repository", + "git_worktree_path", "git_worktree_prune", - "git_worktree_prune_init_options", + "git_worktree_prune_options_init", "git_worktree_unlock", "git_worktree_validate" ] } } ], + [ + "git_worktree_add_options", + { + "decl": [ + "unsigned int version", + "int lock", + "int checkout_existing", + "git_reference * ref", + "git_checkout_options checkout_options" + ], + "type": "struct", + "value": "git_worktree_add_options", + "file": "git2/worktree.h", + "line": 86, + "lineto": 97, + "block": "unsigned int version\nint lock\nint checkout_existing\ngit_reference * ref\ngit_checkout_options checkout_options", + "tdef": "typedef", + "description": " Worktree add options structure", + "comments": "

Initialize with GIT_WORKTREE_ADD_OPTIONS_INIT. Alternatively, you can use git_worktree_add_options_init.

\n", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "int", + "name": "lock", + "comments": " lock newly created worktree " + }, + { + "type": "int", + "name": "checkout_existing", + "comments": " allow checkout of existing branch matching worktree name " + }, + { + "type": "git_reference *", + "name": "ref", + "comments": " reference to use for the new worktree HEAD " + }, + { + "type": "git_checkout_options", + "name": "checkout_options", + "comments": " Options for the checkout." + } + ], + "used": { + "returns": [], + "needs": ["git_worktree_add", "git_worktree_add_options_init"] + } + } + ], + [ + "git_worktree_prune_options", + { + "decl": ["unsigned int version", "uint32_t flags"], + "type": "struct", + "value": "git_worktree_prune_options", + "file": "git2/worktree.h", + "line": 210, + "lineto": 215, + "block": "unsigned int version\nuint32_t flags", + "tdef": "typedef", + "description": " Worktree prune options structure", + "comments": "

Initialize with GIT_WORKTREE_PRUNE_OPTIONS_INIT. Alternatively, you can use git_worktree_prune_options_init.

\n", + "fields": [ + { "type": "unsigned int", "name": "version", "comments": "" }, + { + "type": "uint32_t", + "name": "flags", + "comments": " A combination of `git_worktree_prune_t` " + } + ], + "used": { + "returns": [], + "needs": [ + "git_worktree_is_prunable", + "git_worktree_prune", + "git_worktree_prune_options_init" + ] + } + } + ], [ "git_worktree_prune_t", { @@ -37610,9 +36821,9 @@ "GIT_WORKTREE_PRUNE_WORKING_TREE" ], "type": "enum", - "file": "worktree.h", - "line": 155, - "lineto": 162, + "file": "git2/worktree.h", + "line": 194, + "lineto": 201, "block": "GIT_WORKTREE_PRUNE_VALID\nGIT_WORKTREE_PRUNE_LOCKED\nGIT_WORKTREE_PRUNE_WORKING_TREE", "tdef": "typedef", "description": " Flags which can be passed to git_worktree_prune to alter its\n behavior.", @@ -37637,30 +36848,50 @@ "value": 4 } ], - "used": { - "returns": [], - "needs": [] - } + "used": { "returns": [], "needs": [] } } ], [ "git_writestream", { - "decl": "git_writestream", + "decl": [ + "int (*)(git_writestream *, const char *, size_t) write", + "int (*)(git_writestream *) close", + "void (*)(git_writestream *) free" + ], "type": "struct", "value": "git_writestream", - "file": "types.h", - "line": 429, - "lineto": 429, - "tdef": "typedef", + "file": "git2/types.h", + "line": 375, + "lineto": 379, + "tdef": null, "description": " A type to write in a streaming fashion, for example, for filters. ", "comments": "", + "fields": [ + { + "type": "int (*)(git_writestream *, const char *, size_t)", + "name": "write", + "comments": "" + }, + { + "type": "int (*)(git_writestream *)", + "name": "close", + "comments": "" + }, + { + "type": "void (*)(git_writestream *)", + "name": "free", + "comments": "" + } + ], + "block": "int (*)(git_writestream *, const char *, size_t) write\nint (*)(git_writestream *) close\nvoid (*)(git_writestream *) free", "used": { "returns": [], "needs": [ - "git_blob_create_fromstream", - "git_blob_create_fromstream_commit", + "git_blob_create_from_stream", + "git_blob_create_from_stream_commit", "git_filter_list_stream_blob", + "git_filter_list_stream_buffer", "git_filter_list_stream_data", "git_filter_list_stream_file" ] @@ -37668,7 +36899,7 @@ } ] ], - "prefix": "include/git2", + "prefix": "include", "groups": [ [ "annotated", @@ -37678,17 +36909,22 @@ "git_annotated_commit_from_ref", "git_annotated_commit_from_revspec", "git_annotated_commit_id", - "git_annotated_commit_lookup" + "git_annotated_commit_lookup", + "git_annotated_commit_ref" ] ], + ["apply", ["git_apply", "git_apply_options_init", "git_apply_to_tree"]], [ "attr", [ "git_attr_add_macro", "git_attr_cache_flush", "git_attr_foreach", + "git_attr_foreach_ext", "git_attr_get", + "git_attr_get_ext", "git_attr_get_many", + "git_attr_get_many_ext", "git_attr_value" ] ], @@ -37696,23 +36932,31 @@ "blame", [ "git_blame_buffer", - "git_blame_file", "git_blame_free", "git_blame_get_hunk_byindex", "git_blame_get_hunk_byline", "git_blame_get_hunk_count", - "git_blame_init_options" + "git_blame_hunk_byindex", + "git_blame_hunk_byline", + "git_blame_hunkcount", + "git_blame_init_options", + "git_blame_line_byindex", + "git_blame_linecount", + "git_blame_options_init" ] ], [ "blob", [ - "git_blob_create_frombuffer", - "git_blob_create_fromdisk", - "git_blob_create_fromstream", - "git_blob_create_fromstream_commit", - "git_blob_create_fromworkdir", + "git_blob_create_from_buffer", + "git_blob_create_from_disk", + "git_blob_create_from_stream", + "git_blob_create_from_stream_commit", + "git_blob_create_from_workdir", + "git_blob_data_is_binary", "git_blob_dup", + "git_blob_filter", + "git_blob_filter_options_init", "git_blob_filtered_content", "git_blob_free", "git_blob_id", @@ -37737,15 +36981,21 @@ "git_branch_lookup", "git_branch_move", "git_branch_name", + "git_branch_name_is_valid", "git_branch_next", + "git_branch_remote_name", "git_branch_set_upstream", - "git_branch_upstream" + "git_branch_upstream", + "git_branch_upstream_merge", + "git_branch_upstream_name", + "git_branch_upstream_remote" ] ], [ "buf", [ "git_buf_contains_nul", + "git_buf_dispose", "git_buf_free", "git_buf_grow", "git_buf_is_binary", @@ -37757,36 +37007,27 @@ [ "git_checkout_head", "git_checkout_index", - "git_checkout_init_options", + "git_checkout_options_init", "git_checkout_tree" ] ], [ "cherrypick", - [ - "git_cherrypick", - "git_cherrypick_commit", - "git_cherrypick_init_options" - ] - ], - [ - "clone", - [ - "git_clone", - "git_clone_init_options" - ] + ["git_cherrypick", "git_cherrypick_commit", "git_cherrypick_options_init"] ], + ["clone", ["git_clone", "git_clone_options_init"]], [ "commit", [ "git_commit_amend", "git_commit_author", + "git_commit_author_with_mailmap", "git_commit_body", "git_commit_committer", + "git_commit_committer_with_mailmap", "git_commit_create", "git_commit_create_buffer", - "git_commit_create_from_callback", - "git_commit_create_from_ids", + "git_commit_create_from_stage", "git_commit_create_v", "git_commit_create_with_signature", "git_commit_dup", @@ -37812,10 +37053,10 @@ "git_commit_tree_id" ] ], + ["commitarray", ["git_commitarray_dispose"]], [ "config", [ - "git_config_add_backend", "git_config_add_file_ondisk", "git_config_backend_foreach_match", "git_config_delete_entry", @@ -37837,7 +37078,6 @@ "git_config_get_path", "git_config_get_string", "git_config_get_string_buf", - "git_config_init_backend", "git_config_iterator_free", "git_config_iterator_glob_new", "git_config_iterator_new", @@ -37859,23 +37099,25 @@ "git_config_set_int64", "git_config_set_multivar", "git_config_set_string", + "git_config_set_writeorder", "git_config_snapshot" ] ], [ - "cred", + "credential", [ - "git_cred_default_new", - "git_cred_free", - "git_cred_has_username", - "git_cred_ssh_custom_new", - "git_cred_ssh_interactive_new", - "git_cred_ssh_key_from_agent", - "git_cred_ssh_key_memory_new", - "git_cred_ssh_key_new", - "git_cred_username_new", - "git_cred_userpass", - "git_cred_userpass_plaintext_new" + "git_credential_default_new", + "git_credential_free", + "git_credential_get_username", + "git_credential_has_username", + "git_credential_ssh_custom_new", + "git_credential_ssh_interactive_new", + "git_credential_ssh_key_from_agent", + "git_credential_ssh_key_memory_new", + "git_credential_ssh_key_new", + "git_credential_username_new", + "git_credential_userpass", + "git_credential_userpass_plaintext_new" ] ], [ @@ -37883,6 +37125,8 @@ [ "git_describe_commit", "git_describe_format", + "git_describe_format_options_init", + "git_describe_options_init", "git_describe_result_free", "git_describe_workdir" ] @@ -37894,28 +37138,25 @@ "git_diff_blobs", "git_diff_buffers", "git_diff_commit_as_email", - "git_diff_find_init_options", + "git_diff_find_options_init", "git_diff_find_similar", "git_diff_foreach", "git_diff_format_email", - "git_diff_format_email_init_options", + "git_diff_format_email_options_init", "git_diff_free", "git_diff_from_buffer", "git_diff_get_delta", - "git_diff_get_perfdata", "git_diff_get_stats", "git_diff_index_to_index", "git_diff_index_to_workdir", - "git_diff_init_options", "git_diff_is_sorted_icase", "git_diff_merge", "git_diff_num_deltas", "git_diff_num_deltas_of_type", + "git_diff_options_init", "git_diff_patchid", - "git_diff_patchid_init_options", + "git_diff_patchid_options_init", "git_diff_print", - "git_diff_print_callback__to_buf", - "git_diff_print_callback__to_file_handle", "git_diff_stats_deletions", "git_diff_stats_files_changed", "git_diff_stats_free", @@ -37929,62 +37170,36 @@ "git_diff_tree_to_workdir_with_index" ] ], - [ - "fetch", - [ - "git_fetch_init_options" - ] - ], + ["email", ["git_email_create_from_commit"]], + ["error", ["git_error_last"]], + ["fetch", ["git_fetch_options_init"]], [ "filter", [ - "git_filter_init", "git_filter_list_apply_to_blob", + "git_filter_list_apply_to_buffer", "git_filter_list_apply_to_data", "git_filter_list_apply_to_file", "git_filter_list_contains", "git_filter_list_free", - "git_filter_list_length", "git_filter_list_load", - "git_filter_list_new", - "git_filter_list_push", + "git_filter_list_load_ext", "git_filter_list_stream_blob", + "git_filter_list_stream_buffer", "git_filter_list_stream_data", - "git_filter_list_stream_file", - "git_filter_lookup", - "git_filter_register", - "git_filter_source_filemode", - "git_filter_source_flags", - "git_filter_source_id", - "git_filter_source_mode", - "git_filter_source_path", - "git_filter_source_repo", - "git_filter_unregister" + "git_filter_list_stream_file" ] ], [ "giterr", - [ - "giterr_clear", - "giterr_last", - "giterr_set_oom", - "giterr_set_str" - ] + ["giterr_clear", "giterr_last", "giterr_set_oom", "giterr_set_str"] ], [ "graph", [ "git_graph_ahead_behind", - "git_graph_descendant_of" - ] - ], - [ - "hashsig", - [ - "git_hashsig_compare", - "git_hashsig_create", - "git_hashsig_create_fromfile", - "git_hashsig_free" + "git_graph_descendant_of", + "git_graph_reachable_from_any" ] ], [ @@ -38001,7 +37216,7 @@ "git_index_add", "git_index_add_all", "git_index_add_bypath", - "git_index_add_frombuffer", + "git_index_add_from_buffer", "git_index_caps", "git_index_checksum", "git_index_clear", @@ -38021,6 +37236,9 @@ "git_index_get_byindex", "git_index_get_bypath", "git_index_has_conflicts", + "git_index_iterator_free", + "git_index_iterator_new", + "git_index_iterator_next", "git_index_new", "git_index_open", "git_index_owner", @@ -38047,25 +37265,33 @@ "git_indexer_commit", "git_indexer_free", "git_indexer_hash", - "git_indexer_new" + "git_indexer_name", + "git_indexer_new", + "git_indexer_options_init" ] ], [ "libgit2", [ + "git_libgit2_feature_backend", "git_libgit2_features", "git_libgit2_init", "git_libgit2_opts", + "git_libgit2_prerelease", "git_libgit2_shutdown", "git_libgit2_version" ] ], [ - "mempack", + "mailmap", [ - "git_mempack_dump", - "git_mempack_new", - "git_mempack_reset" + "git_mailmap_add_entry", + "git_mailmap_free", + "git_mailmap_from_buffer", + "git_mailmap_from_repository", + "git_mailmap_new", + "git_mailmap_resolve", + "git_mailmap_resolve_signature" ] ], [ @@ -38073,6 +37299,7 @@ [ "git_merge", "git_merge_analysis", + "git_merge_analysis_for_ref", "git_merge_base", "git_merge_base_many", "git_merge_base_octopus", @@ -38081,10 +37308,10 @@ "git_merge_commits", "git_merge_file", "git_merge_file_from_index", - "git_merge_file_init_input", - "git_merge_file_init_options", + "git_merge_file_input_init", + "git_merge_file_options_init", "git_merge_file_result_free", - "git_merge_init_options", + "git_merge_options_init", "git_merge_trees" ] ], @@ -38096,27 +37323,7 @@ "git_message_trailers" ] ], - [ - "note", - [ - "git_note_author", - "git_note_commit_create", - "git_note_commit_iterator_new", - "git_note_commit_read", - "git_note_commit_remove", - "git_note_committer", - "git_note_create", - "git_note_foreach", - "git_note_free", - "git_note_id", - "git_note_iterator_free", - "git_note_iterator_new", - "git_note_message", - "git_note_next", - "git_note_read", - "git_note_remove" - ] - ], + ["note", ["git_note_iterator_free", "git_note_next"]], [ "object", [ @@ -38129,6 +37336,7 @@ "git_object_lookup_prefix", "git_object_owner", "git_object_peel", + "git_object_rawcontent_is_valid", "git_object_short_id", "git_object_string2type", "git_object_type", @@ -38146,6 +37354,7 @@ "git_odb_backend_one_pack", "git_odb_backend_pack", "git_odb_exists", + "git_odb_exists_ext", "git_odb_exists_prefix", "git_odb_expand_ids", "git_odb_foreach", @@ -38153,7 +37362,6 @@ "git_odb_get_backend", "git_odb_hash", "git_odb_hashfile", - "git_odb_init_backend", "git_odb_new", "git_odb_num_backends", "git_odb_object_data", @@ -38169,11 +37377,13 @@ "git_odb_read_header", "git_odb_read_prefix", "git_odb_refresh", + "git_odb_set_commit_graph", "git_odb_stream_finalize_write", "git_odb_stream_free", "git_odb_stream_read", "git_odb_stream_write", "git_odb_write", + "git_odb_write_multi_pack_index", "git_odb_write_pack" ] ], @@ -38188,7 +37398,7 @@ "git_oid_fromstr", "git_oid_fromstrn", "git_oid_fromstrp", - "git_oid_iszero", + "git_oid_is_zero", "git_oid_ncmp", "git_oid_nfmt", "git_oid_pathfmt", @@ -38201,18 +37411,7 @@ "git_oid_tostr_s" ] ], - [ - "oidarray", - [ - "git_oidarray_free" - ] - ], - [ - "openssl", - [ - "git_openssl_set_locking" - ] - ], + ["oidarray", ["git_oidarray_dispose", "git_oidarray_free"]], [ "packbuilder", [ @@ -38224,11 +37423,13 @@ "git_packbuilder_insert_recur", "git_packbuilder_insert_tree", "git_packbuilder_insert_walk", + "git_packbuilder_name", "git_packbuilder_new", "git_packbuilder_object_count", "git_packbuilder_set_callbacks", "git_packbuilder_set_threads", "git_packbuilder_write", + "git_packbuilder_write_buf", "git_packbuilder_written" ] ], @@ -38246,6 +37447,7 @@ "git_patch_line_stats", "git_patch_num_hunks", "git_patch_num_lines_in_hunk", + "git_patch_owner", "git_patch_print", "git_patch_size", "git_patch_to_buf" @@ -38269,18 +37471,8 @@ "git_pathspec_new" ] ], - [ - "proxy", - [ - "git_proxy_init_options" - ] - ], - [ - "push", - [ - "git_push_init_options" - ] - ], + ["proxy", ["git_proxy_options_init"]], + ["push", ["git_push_options_init"]], [ "rebase", [ @@ -38289,32 +37481,31 @@ "git_rebase_finish", "git_rebase_free", "git_rebase_init", - "git_rebase_init_options", "git_rebase_inmemory_index", "git_rebase_next", + "git_rebase_onto_id", + "git_rebase_onto_name", "git_rebase_open", "git_rebase_operation_byindex", "git_rebase_operation_current", - "git_rebase_operation_entrycount" + "git_rebase_operation_entrycount", + "git_rebase_options_init", + "git_rebase_orig_head_id", + "git_rebase_orig_head_name" ] ], [ "refdb", [ - "git_refdb_backend_fs", "git_refdb_compress", "git_refdb_free", - "git_refdb_init_backend", "git_refdb_new", - "git_refdb_open", - "git_refdb_set_backend" + "git_refdb_open" ] ], [ "reference", [ - "git_reference__alloc", - "git_reference__alloc_symbolic", "git_reference_cmp", "git_reference_create", "git_reference_create_matching", @@ -38338,6 +37529,7 @@ "git_reference_list", "git_reference_lookup", "git_reference_name", + "git_reference_name_is_valid", "git_reference_name_to_id", "git_reference_next", "git_reference_next_name", @@ -38383,9 +37575,12 @@ "git_refspec_dst", "git_refspec_dst_matches", "git_refspec_force", + "git_refspec_free", + "git_refspec_parse", "git_refspec_rtransform", "git_refspec_src", "git_refspec_src_matches", + "git_refspec_src_matches_negative", "git_refspec_string", "git_refspec_transform" ] @@ -38397,11 +37592,15 @@ "git_remote_add_push", "git_remote_autotag", "git_remote_connect", + "git_remote_connect_ext", + "git_remote_connect_options_init", "git_remote_connected", "git_remote_create", "git_remote_create_anonymous", "git_remote_create_detached", + "git_remote_create_options_init", "git_remote_create_with_fetchspec", + "git_remote_create_with_opts", "git_remote_default_branch", "git_remote_delete", "git_remote_disconnect", @@ -38418,6 +37617,7 @@ "git_remote_lookup", "git_remote_ls", "git_remote_name", + "git_remote_name_is_valid", "git_remote_owner", "git_remote_prune", "git_remote_prune_refs", @@ -38426,6 +37626,8 @@ "git_remote_refspec_count", "git_remote_rename", "git_remote_set_autotag", + "git_remote_set_instance_pushurl", + "git_remote_set_instance_url", "git_remote_set_pushurl", "git_remote_set_url", "git_remote_stats", @@ -38438,7 +37640,7 @@ [ "repository", [ - "git_repository__cleanup", + "git_repository_commit_parents", "git_repository_commondir", "git_repository_config", "git_repository_config_snapshot", @@ -38450,13 +37652,14 @@ "git_repository_hashfile", "git_repository_head", "git_repository_head_detached", + "git_repository_head_detached_for_worktree", "git_repository_head_for_worktree", "git_repository_head_unborn", "git_repository_ident", "git_repository_index", "git_repository_init", "git_repository_init_ext", - "git_repository_init_init_options", + "git_repository_init_options_init", "git_repository_is_bare", "git_repository_is_empty", "git_repository_is_shallow", @@ -38465,58 +37668,29 @@ "git_repository_mergehead_foreach", "git_repository_message", "git_repository_message_remove", - "git_repository_new", "git_repository_odb", + "git_repository_oid_type", "git_repository_open", "git_repository_open_bare", "git_repository_open_ext", "git_repository_open_from_worktree", "git_repository_path", "git_repository_refdb", - "git_repository_reinit_filesystem", - "git_repository_set_bare", - "git_repository_set_config", "git_repository_set_head", "git_repository_set_head_detached", "git_repository_set_head_detached_from_annotated", "git_repository_set_ident", - "git_repository_set_index", "git_repository_set_namespace", - "git_repository_set_odb", - "git_repository_set_refdb", "git_repository_set_workdir", "git_repository_state", "git_repository_state_cleanup", - "git_repository_submodule_cache_all", - "git_repository_submodule_cache_clear", "git_repository_workdir", "git_repository_wrap_odb" ] ], - [ - "reset", - [ - "git_reset", - "git_reset_default", - "git_reset_from_annotated" - ] - ], - [ - "revert", - [ - "git_revert", - "git_revert_commit", - "git_revert_init_options" - ] - ], - [ - "revparse", - [ - "git_revparse", - "git_revparse_ext", - "git_revparse_single" - ] - ], + ["reset", ["git_reset", "git_reset_default", "git_reset_from_annotated"]], + ["revert", ["git_revert", "git_revert_commit", "git_revert_options_init"]], + ["revparse", ["git_revparse", "git_revparse_ext", "git_revparse_single"]], [ "revwalk", [ @@ -38543,6 +37717,7 @@ "signature", [ "git_signature_default", + "git_signature_default_from_env", "git_signature_dup", "git_signature_free", "git_signature_from_buffer", @@ -38550,22 +37725,17 @@ "git_signature_now" ] ], - [ - "smart", - [ - "git_smart_subtransport_git", - "git_smart_subtransport_http", - "git_smart_subtransport_ssh" - ] - ], [ "stash", [ "git_stash_apply", - "git_stash_apply_init_options", + "git_stash_apply_options_init", "git_stash_drop", "git_stash_foreach", - "git_stash_pop" + "git_stash_pop", + "git_stash_save", + "git_stash_save_options_init", + "git_stash_save_with_opts" ] ], [ @@ -38575,26 +37745,16 @@ "git_status_file", "git_status_foreach", "git_status_foreach_ext", - "git_status_init_options", "git_status_list_entrycount", "git_status_list_free", - "git_status_list_get_perfdata", "git_status_list_new", + "git_status_options_init", "git_status_should_ignore" ] ], [ "strarray", - [ - "git_strarray_copy", - "git_strarray_free" - ] - ], - [ - "stream", - [ - "git_stream_register_tls" - ] + ["git_strarray_copy", "git_strarray_dispose", "git_strarray_free"] ], [ "submodule", @@ -38603,6 +37763,8 @@ "git_submodule_add_setup", "git_submodule_add_to_index", "git_submodule_branch", + "git_submodule_clone", + "git_submodule_dup", "git_submodule_fetch_recurse_submodules", "git_submodule_foreach", "git_submodule_free", @@ -38627,7 +37789,7 @@ "git_submodule_status", "git_submodule_sync", "git_submodule_update", - "git_submodule_update_init_options", + "git_submodule_update_options_init", "git_submodule_update_strategy", "git_submodule_url", "git_submodule_wd_id" @@ -38638,7 +37800,7 @@ [ "git_tag_annotation_create", "git_tag_create", - "git_tag_create_frombuffer", + "git_tag_create_from_buffer", "git_tag_create_lightweight", "git_tag_delete", "git_tag_dup", @@ -38651,6 +37813,7 @@ "git_tag_lookup_prefix", "git_tag_message", "git_tag_name", + "git_tag_name_is_valid", "git_tag_owner", "git_tag_peel", "git_tag_tagger", @@ -38659,32 +37822,18 @@ "git_tag_target_type" ] ], + ["trace", ["git_trace_set"]], [ - "time", - [ - "git_time_monotonic" - ] - ], - [ - "trace", - [ - "git_trace_set" - ] - ], - [ - "transport", + "transaction", [ - "git_transport_dummy", - "git_transport_init", - "git_transport_local", - "git_transport_new", - "git_transport_register", - "git_transport_smart", - "git_transport_smart_certificate_check", - "git_transport_smart_credentials", - "git_transport_smart_proxy_options", - "git_transport_ssh_with_paths", - "git_transport_unregister" + "git_transaction_commit", + "git_transaction_free", + "git_transaction_lock_ref", + "git_transaction_new", + "git_transaction_remove", + "git_transaction_set_reflog", + "git_transaction_set_symbolic_target", + "git_transaction_set_target" ] ], [ @@ -38733,113 +37882,52 @@ "worktree", [ "git_worktree_add", - "git_worktree_add_init_options", + "git_worktree_add_options_init", "git_worktree_free", "git_worktree_is_locked", "git_worktree_is_prunable", "git_worktree_list", "git_worktree_lock", "git_worktree_lookup", + "git_worktree_name", "git_worktree_open_from_repository", + "git_worktree_path", "git_worktree_prune", - "git_worktree_prune_init_options", + "git_worktree_prune_options_init", "git_worktree_unlock", "git_worktree_validate" ] ] ], "examples": [ - [ - "add.c", - "ex/HEAD/add.html" - ], - [ - "blame.c", - "ex/HEAD/blame.html" - ], - [ - "cat-file.c", - "ex/HEAD/cat-file.html" - ], - [ - "common.c", - "ex/HEAD/common.html" - ], - [ - "describe.c", - "ex/HEAD/describe.html" - ], - [ - "diff.c", - "ex/HEAD/diff.html" - ], - [ - "for-each-ref.c", - "ex/HEAD/for-each-ref.html" - ], - [ - "general.c", - "ex/HEAD/general.html" - ], - [ - "init.c", - "ex/HEAD/init.html" - ], - [ - "log.c", - "ex/HEAD/log.html" - ], - [ - "merge.c", - "ex/HEAD/merge.html" - ], - [ - "network/clone.c", - "ex/HEAD/network/clone.html" - ], - [ - "network/common.c", - "ex/HEAD/network/common.html" - ], - [ - "network/fetch.c", - "ex/HEAD/network/fetch.html" - ], - [ - "network/git2.c", - "ex/HEAD/network/git2.html" - ], - [ - "network/index-pack.c", - "ex/HEAD/network/index-pack.html" - ], - [ - "network/ls-remote.c", - "ex/HEAD/network/ls-remote.html" - ], - [ - "remote.c", - "ex/HEAD/remote.html" - ], - [ - "rev-list.c", - "ex/HEAD/rev-list.html" - ], - [ - "rev-parse.c", - "ex/HEAD/rev-parse.html" - ], - [ - "showindex.c", - "ex/HEAD/showindex.html" - ], - [ - "status.c", - "ex/HEAD/status.html" - ], - [ - "tag.c", - "ex/HEAD/tag.html" - ] + ["add.c", "ex/v1.9.1/add.html"], + ["args.c", "ex/v1.9.1/args.html"], + ["blame.c", "ex/v1.9.1/blame.html"], + ["cat-file.c", "ex/v1.9.1/cat-file.html"], + ["checkout.c", "ex/v1.9.1/checkout.html"], + ["clone.c", "ex/v1.9.1/clone.html"], + ["commit.c", "ex/v1.9.1/commit.html"], + ["common.c", "ex/v1.9.1/common.html"], + ["config.c", "ex/v1.9.1/config.html"], + ["describe.c", "ex/v1.9.1/describe.html"], + ["diff.c", "ex/v1.9.1/diff.html"], + ["fetch.c", "ex/v1.9.1/fetch.html"], + ["for-each-ref.c", "ex/v1.9.1/for-each-ref.html"], + ["general.c", "ex/v1.9.1/general.html"], + ["index-pack.c", "ex/v1.9.1/index-pack.html"], + ["init.c", "ex/v1.9.1/init.html"], + ["lg2.c", "ex/v1.9.1/lg2.html"], + ["log.c", "ex/v1.9.1/log.html"], + ["ls-files.c", "ex/v1.9.1/ls-files.html"], + ["ls-remote.c", "ex/v1.9.1/ls-remote.html"], + ["merge.c", "ex/v1.9.1/merge.html"], + ["push.c", "ex/v1.9.1/push.html"], + ["remote.c", "ex/v1.9.1/remote.html"], + ["rev-list.c", "ex/v1.9.1/rev-list.html"], + ["rev-parse.c", "ex/v1.9.1/rev-parse.html"], + ["show-index.c", "ex/v1.9.1/show-index.html"], + ["stash.c", "ex/v1.9.1/stash.html"], + ["status.c", "ex/v1.9.1/status.html"], + ["tag.c", "ex/v1.9.1/tag.html"] ] } diff --git a/generate/input/libgit2-supplement.json b/generate/input/libgit2-supplement.json index 3697a3485e..12ee8945b6 100644 --- a/generate/input/libgit2-supplement.json +++ b/generate/input/libgit2-supplement.json @@ -1,7 +1,7 @@ { "types": { - "git_cred_default": { - "decl": "git_cred" + "git_credential_default": { + "decl": "git_credential" }, "git_diff_hunk": { "decl": [ @@ -82,33 +82,176 @@ }, "git_note_iterator": { "decl": "git_iterator" + }, + "git_checkout_options": { + "decl": [ + "unsigned int version", + "unsigned int checkout_strategy", + "int disable_filters", + "unsigned int dir_mode", + "unsigned int file_mode", + "int file_open_flags", + "unsigned int notify_flags", + "git_checkout_notify_cb notify_cb", + "void * notify_payload", + "git_checkout_progress_cb progress_cb", + "void * progress_payload", + "git_strarray paths", + "git_tree * baseline", + "git_index * baseline_index", + "const char * target_directory", + "const char * ancestor_label", + "const char * our_label", + "const char * their_label", + "git_checkout_perfdata_cb perfdata_cb", + "void * perfdata_payload", + "git_strarray disabled_filters" + ], + "fields": [ + { + "type": "unsigned int", + "name": "version", + "comments": " The version " + }, + { + "type": "unsigned int", + "name": "checkout_strategy", + "comments": " default will be a safe checkout " + }, + { + "type": "int", + "name": "disable_filters", + "comments": " don't apply filters like CRLF conversion " + }, + { + "type": "unsigned int", + "name": "dir_mode", + "comments": " default is 0755 " + }, + { + "type": "unsigned int", + "name": "file_mode", + "comments": " default is 0644 or 0755 as dictated by blob " + }, + { + "type": "int", + "name": "file_open_flags", + "comments": " default is O_CREAT | O_TRUNC | O_WRONLY " + }, + { + "type": "unsigned int", + "name": "notify_flags", + "comments": " see `git_checkout_notify_t` above " + }, + { + "type": "git_checkout_notify_cb", + "name": "notify_cb", + "comments": " Optional callback to get notifications on specific file states.\n " + }, + { + "type": "void *", + "name": "notify_payload", + "comments": " Payload passed to notify_cb " + }, + { + "type": "git_checkout_progress_cb", + "name": "progress_cb", + "comments": " Optional callback to notify the consumer of checkout progress. " + }, + { + "type": "void *", + "name": "progress_payload", + "comments": " Payload passed to progress_cb " + }, + { + "type": "git_strarray", + "name": "paths", + "comments": " A list of wildmatch patterns or paths.\n\n By default, all paths are processed. If you pass an array of wildmatch\n patterns, those will be used to filter which paths should be taken into\n account.\n\n Use GIT_CHECKOUT_DISABLE_PATHSPEC_MATCH to treat as a simple list." + }, + { + "type": "git_tree *", + "name": "baseline", + "comments": " The expected content of the working directory; defaults to HEAD.\n\n If the working directory does not match this baseline information,\n that will produce a checkout conflict." + }, + { + "type": "git_index *", + "name": "baseline_index", + "comments": " Like `baseline` above, though expressed as an index. This\n option overrides `baseline`." + }, + { + "type": "const char *", + "name": "target_directory", + "comments": " alternative checkout path to workdir " + }, + { + "type": "const char *", + "name": "ancestor_label", + "comments": " the name of the common ancestor side of conflicts " + }, + { + "type": "const char *", + "name": "our_label", + "comments": " the name of the \"our\" side of conflicts " + }, + { + "type": "const char *", + "name": "their_label", + "comments": " the name of the \"their\" side of conflicts " + }, + { + "type": "git_checkout_perfdata_cb", + "name": "perfdata_cb", + "comments": " Optional callback to notify the consumer of performance data. " + }, + { + "type": "void *", + "name": "perfdata_payload", + "comments": " Payload passed to perfdata_cb " + }, + { + "type": "git_strarray", + "name": "disabled_filters", + "comments": " A list filters to disable during checkout." + } + ] } }, - "new" : { + "new": { "functions": { - "git_branch_remote_name": { + "git_libgit2_opts": { + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/libgit2/opts.cc", + "isAsync": false, + "isPrototypeMethod": false, + "group": "libgit2" + }, + "git_blame_file": { "type": "function", - "file": "branch.h", + "file": "blame.h", "args": [ { "name": "out", - "type": "git_buf *" + "type": "git_blame **" }, { "name": "repo", "type": "git_repository *" }, { - "name": "canonical_branch_name", + "name": "path", "type": "const char *" + }, + { + "name": "options", + "type": "git_blame_options *" } ], - "isAsync": true, + "group": "blame", "return": { "type": "int", "isErrorCode": true - }, - "group": "branch" + } }, "git_clone": { "isManual": true, @@ -151,6 +294,71 @@ "isErrorCode": true } }, + "git_email_create_from_diff": { + "file": "sys/email.h", + "type": "function", + "isAsync": true, + "group": "email", + "args": [ + { + "name": "out", + "type": "git_buf *" + }, + { + "name": "diff", + "type": "git_diff *" + }, + { + "name": "patch_idx", + "type": "size_t" + }, + { + "name": "patch_count", + "type": "size_t" + }, + { + "name": "commit_id", + "type": "const git_oid *" + }, + { + "name": "summary", + "type": "const char *" + }, + { + "name": "body", + "type": "const char *" + }, + { + "name": "author", + "type": "const git_signature *" + }, + { + "name": "opts", + "type": "git_email_create_options *" + } + ], + "return": { + "type": "int", + "isErrorCode": true + } + }, + "git_diff_get_perfdata": { + "file": "sys/diff.h", + "args": [ + { + "name": "out", + "type": "git_diff_perfdata *" + }, + { + "name": "diff", + "type": "const git_diff *" + } + ], + "return": { + "type": "int" + }, + "group": "diff" + }, "git_filter_list_load": { "isManual": true, "cFile": "generate/templates/manual/filter_list/load.cc", @@ -158,93 +366,1049 @@ "isPrototypeMethod": false, "group": "filter_list" }, - "git_patch_convenient_from_diff": { + "git_filter_source_filemode": { + "type": "function", + "file": "sys/filter.h", + "args": [ + { + "name": "src", + "type": "const git_filter_source *" + } + ], + "return": { + "type": "uint16_t" + }, + "group": "filter_source" + }, + "git_filter_source_flags": { + "type": "function", + "file": "sys/filter.h", + "args": [ + { + "name": "src", + "type": "const git_filter_source *" + } + ], + "return": { + "type": "uint32_t" + }, + "group": "filter_source" + }, + "git_filter_source_id": { + "type": "function", + "file": "sys/filter.h", + "args": [ + { + "name": "src", + "type": "const git_filter_source *" + } + ], + "return": { + "type": "const git_oid *" + }, + "group": "filter_source" + }, + "git_filter_source_mode": { + "type": "function", + "file": "sys/filter.h", + "args": [ + { + "name": "src", + "type": "const git_filter_source *" + } + ], + "return": { + "type": "git_filter_mode_t" + }, + "group": "filter_source" + }, + "git_filter_source_path": { + "type": "function", + "file": "sys/filter.h", + "args": [ + { + "name": "src", + "type": "const git_filter_source *" + } + ], + "return": { + "type": "const char *" + }, + "group": "filter_source" + }, + "git_filter_source_repo": { + "args": [ + { + "name": "out", + "type": "git_repository **" + }, + { + "name": "src", + "type": "const git_filter_source *" + } + ], + "isManual": true, + "cFile": "generate/templates/manual/filter_source/repo.cc", + "isAsync": true, + "isPrototypeMethod": true, + "type": "function", + "group": "filter_source", + "return": { + "type": "int", + "isErrorCode": true + } + }, + "git_hashsig_compare": { + "type": "function", + "file": "sys/hashsig.h", + "args": [ + { + "name": "a", + "type": "const git_hashsig *" + }, + { + "name": "b", + "type": "const git_hashsig *" + } + ], + "return": { + "type": "int" + }, + "group": "hashsig" + }, + "git_hashsig_create": { + "type": "function", + "file": "sys/hashsig.h", + "args": [ + { + "name": "out", + "type": "git_hashsig **" + }, + { + "name": "buf", + "type": "const char *" + }, + { + "name": "buflen", + "type": "size_t" + }, + { + "name": "opts", + "type": "git_hashsig_option_t" + } + ], + "return": { + "type": "int" + }, + "group": "hashsig" + }, + "git_hashsig_create_fromfile": { + "type": "function", + "file": "sys/hashsig.h", + "args": [ + { + "name": "out", + "type": "git_hashsig **" + }, + { + "name": "path", + "type": "const char *" + }, + { + "name": "opts", + "type": "git_hashsig_option_t" + } + ], + "return": { + "type": "int" + }, + "group": "hashsig" + }, + "git_index_name_add": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + }, + { + "name": "ancestor", + "type": "const char *" + }, + { + "name": "ours", + "type": "const char *" + }, + { + "name": "theirs", + "type": "const char *" + } + ], + "return": { + "type": "int" + }, + "group": "index_name_entry" + }, + "git_index_name_clear": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + } + ], + "return": { + "type": "int" + }, + "group": "index_name_entry" + }, + "git_index_name_entrycount": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + } + ], + "return": { + "type": "size_t" + }, + "group": "index_name_entry" + }, + "git_index_name_get_byindex": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + }, + { + "name": "n", + "type": "size_t" + } + ], + "return": { + "type": "const git_index_name_entry *" + }, + "group": "index_name_entry" + }, + "git_index_reuc_add": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + }, + { + "name": "path", + "type": "const char *" + }, + { + "name": "ancestor_mode", + "type": "int" + }, + { + "name": "ancestor_id", + "type": "const git_oid *" + }, + { + "name": "our_mode", + "type": "int" + }, + { + "name": "our_id", + "type": "const git_oid *" + }, + { + "name": "their_mode", + "type": "int" + }, + { + "name": "their_id", + "type": "const git_oid *" + } + ], + "return": { + "type": "int" + }, + "group": "index_reuc_entry" + }, + "git_index_reuc_clear": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + } + ], + "return": { + "type": "int" + }, + "group": "index_reuc_entry" + }, + "git_index_reuc_entrycount": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + } + ], + "return": { + "type": "size_t" + }, + "group": "index_reuc_entry" + }, + "git_index_reuc_find": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "at_pos", + "type": "size_t *" + }, + { + "name": "index", + "type": "git_index *" + }, + { + "name": "path", + "type": "const char *" + } + ], + "return": { + "type": "int" + }, + "group": "index_reuc_entry" + }, + "git_index_reuc_get_byindex": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + }, + { + "name": "n", + "type": "size_t" + } + ], + "return": { + "type": "const git_index_reuc_entry *" + }, + "group": "index_reuc_entry" + }, + "git_index_reuc_get_bypath": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + }, + { + "name": "path", + "type": "const char *" + } + ], + "return": { + "type": "const git_index_reuc_entry *" + }, + "group": "index_reuc_entry" + }, + "git_index_reuc_remove": { + "type": "function", + "file": "sys/index.h", + "args": [ + { + "name": "index", + "type": "git_index *" + }, + { + "name": "n", + "type": "size_t" + } + ], + "return": { + "type": "int" + }, + "group": "index_reuc_entry" + }, + "git_note_author": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "note", + "type": "const git_note *" + } + ], + "return": { + "type": "const git_signature *" + }, + "group": "note" + }, + "git_note_commit_create": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "notes_commit_out", + "type": "git_oid *" + }, + { + "name": "notes_blob_out", + "type": "git_oid *" + }, + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "parent", + "type": "git_commit *" + }, + { + "name": "author", + "type": "const git_signature *" + }, + { + "name": "committer", + "type": "const git_signature *" + }, + { + "name": "oid", + "type": "const git_oid *" + }, + { + "name": "note", + "type": "const char *" + }, + { + "name": "allow_note_overwrite", + "type": "int" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_commit_iterator_new": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "out", + "type": "git_note_iterator **" + }, + { + "name": "notes_commit", + "type": "git_commit *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_commit_read": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "out", + "type": "git_note **" + }, + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "notes_commit", + "type": "git_commit *" + }, + { + "name": "oid", + "type": "const git_oid *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_commit_remove": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "notes_commit_out", + "type": "git_oid *" + }, + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "notes_commit", + "type": "git_commit *" + }, + { + "name": "author", + "type": "const git_signature *" + }, + { + "name": "committer", + "type": "const git_signature *" + }, + { + "name": "oid", + "type": "const git_oid *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_committer": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "note", + "type": "const git_note *" + } + ], + "return": { + "type": "const git_signature *" + }, + "group": "note" + }, + "git_note_create": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "out", + "type": "git_oid *" + }, + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "notes_ref", + "type": "const char *" + }, + { + "name": "author", + "type": "const git_signature *" + }, + { + "name": "committer", + "type": "const git_signature *" + }, + { + "name": "oid", + "type": "const git_oid *" + }, + { + "name": "note", + "type": "const char *" + }, + { + "name": "force", + "type": "int" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_default_ref": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "out", + "type": "git_buf *" + }, + { + "name": "repo", + "type": "git_repository *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_foreach": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "notes_ref", + "type": "const char *" + }, + { + "name": "note_cb", + "type": "git_note_foreach_cb" + }, + { + "name": "payload", + "type": "void *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_free": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "note", + "type": "git_note *" + } + ], + "return": { + "type": "void" + }, + "group": "note" + }, + "git_note_id": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "note", + "type": "const git_note *" + } + ], + "return": { + "type": "const git_oid *" + }, + "group": "note" + }, + "git_note_iterator_free": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "it", + "type": "git_note_iterator *" + } + ], + "return": { + "type": "void" + }, + "group": "note" + }, + "git_note_iterator_new": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "out", + "type": "git_note_iterator **" + }, + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "notes_ref", + "type": "const char *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_message": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "note", + "type": "const git_note *" + } + ], + "return": { + "type": "const char *" + }, + "group": "note" + }, + "git_note_next": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "note_id", + "type": "git_oid *" + }, + { + "name": "annotated_id", + "type": "git_oid *" + }, + { + "name": "it", + "type": "git_note_iterator *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_read": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "out", + "type": "git_note **" + }, + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "notes_ref", + "type": "const char *" + }, + { + "name": "oid", + "type": "const git_oid *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_note_remove": { + "type": "function", + "file": "note.h", + "args": [ + { + "name": "repo", + "type": "git_repository *" + }, + { + "name": "notes_ref", + "type": "const char *" + }, + { + "name": "author", + "type": "const git_signature *" + }, + { + "name": "committer", + "type": "const git_signature *" + }, + { + "name": "oid", + "type": "const git_oid *" + } + ], + "return": { + "type": "int" + }, + "group": "note" + }, + "git_patch_convenient_from_diff": { + "args": [ + { + "name": "diff", + "type": "git_diff *" + }, + { + "name": "indexes", + "type": "std::vector" + }, + { + "name": "out", + "type": "std::vector *" + } + ], + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/patches/convenient_patches.cc", + "isAsync": true, + "isPrototypeMethod": false, + "group": "patch", + "return": { + "type": "int", + "isErrorCode": true + } + }, + "git_path_is_gitfile": { + "type": "function", + "file": "sys/path.h", + "args": [ + { + "name": "path", + "type": "const char *" + }, + { + "name": "pathlen", + "type": "size_t" + }, + { + "name": "gitfile", + "type": "git_path_gitfile" + }, + { + "name": "fs", + "type": "git_path_fs" + } + ], + "return": { + "type": "int" + }, + "group": "path" + }, + "git_remote_reference_list": { + "args": [ + { + "name": "out", + "type": "std::vector *" + }, + { + "name": "remote", + "type": "git_remote *" + } + ], + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/remote/ls.cc", + "isAsync": true, + "isPrototypeMethod": true, + "group": "remote", + "return": { + "type": "int", + "isErrorCode": true + } + }, + "git_repository__cleanup": { + "type": "function", + "file": "sys/repository.h", + "args": [ + { + "name": "repo", + "type": "git_repository *" + } + ], + "return": { + "type": "int" + }, + "group": "repository" + }, + "git_repository_get_references": { + "args": [ + { + "name": "out", + "type": "std::vector *" + }, + { + "name": "repo", + "type": "git_repository *" + } + ], + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/repository/get_references.cc", + "isAsync": true, + "isPrototypeMethod": true, + "group": "repository", + "return": { + "type": "int", + "isErrorCode": true + } + }, + "git_repository_get_submodules": { + "args": [ + { + "name": "out", + "type": "std::vector *" + }, + { + "name": "repo", + "type": "git_repository *" + } + ], + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/repository/get_submodules.cc", + "isAsync": true, + "isPrototypeMethod": true, + "group": "repository", + "return": { + "type": "int", + "isErrorCode": true + } + }, + "git_repository_get_remotes": { + "args": [ + { + "name": "out", + "type": "std::vector *" + }, + { + "name": "repo", + "type": "git_repository *" + } + ], + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/repository/get_remotes.cc", + "isAsync": true, + "isPrototypeMethod": true, + "group": "repository", + "return": { + "type": "int", + "isErrorCode": true + } + }, + "git_repository_refresh_references": { "args": [ { - "name": "diff", - "type": "git_diff *" + "name": "out", + "type": "void *" }, { - "name": "out", - "type": "std::vector *" + "name": "repo", + "type": "git_repository *" } ], "type": "function", "isManual": true, - "cFile": "generate/templates/manual/patches/convenient_patches.cc", + "cFile": "generate/templates/manual/repository/refresh_references.cc", "isAsync": true, - "isPrototypeMethod": false, - "group": "patch", + "isPrototypeMethod": true, + "group": "repository", "return": { "type": "int", "isErrorCode": true } }, - "git_rebase_next": { + "git_repository_set_index": { "type": "function", - "file": "rebase.h", + "file": "sys/repository.h", "args": [ { - "name": "out", - "type": "git_rebase_operation **" + "name": "repo", + "type": "git_repository *" }, { - "name": "rebase", - "type": "git_rebase *" + "name": "index", + "type": "git_index *" } ], "return": { "type": "int" }, - "group": "rebase" + "group": "repository" }, - "git_remote_reference_list": { + "git_repository_statistics": { "args": [ { "name": "out", - "type": "std::vector *" + "type": "void *" }, { - "name": "remote", - "type": "git_remote *" + "name": "repo", + "type": "git_repository *" } ], "type": "function", "isManual": true, - "cFile": "generate/templates/manual/remote/ls.cc", + "cFile": "generate/templates/manual/repository/statistics.cc", "isAsync": true, "isPrototypeMethod": true, - "group": "remote", + "group": "repository", "return": { "type": "int", "isErrorCode": true } }, - "git_reset": { + "git_repository_submodule_cache_all": { + "type": "function", + "file": "sys/repository.h", + "args": [ + { + "name": "repo", + "type": "git_repository *" + } + ], + "return": { + "type": "int" + }, + "group": "repository" + }, + "git_repository_submodule_cache_clear": { "type": "function", - "file": "reset.h", + "file": "sys/repository.h", "args": [ { "name": "repo", "type": "git_repository *" + } + ], + "return": { + "type": "int" + }, + "group": "repository" + }, + "git_revwalk_commit_walk": { + "args": [ + { + "name": "max_count", + "type": "int" }, { - "name": "target", - "type": "git_object *" + "name": "out", + "type": "void *" }, { - "name": "reset_type", - "type": "git_reset_t" + "name": "returnPlainObjects", + "type": "bool" }, { - "name": "checkout_opts", - "type": "git_checkout_options *" + "name": "walk", + "type": "git_revwalk *" } ], + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/revwalk/commit_walk.cc", + "isAsync": true, + "isPrototypeMethod": true, + "group": "revwalk", "return": { - "type": "int" - }, - "group": "reset" + "type": "int", + "isErrorCode": true + } }, "git_revwalk_fast_walk": { "args": [ @@ -280,11 +1444,11 @@ }, { "name": "max_count", - "type": "int" + "type": "unsigned int" }, { "name": "out", - "type": "std::vector< std::pair > *> *" + "type": "std::vector *" }, { "name": "walk", @@ -302,35 +1466,48 @@ "isErrorCode": true } }, - "git_stash_save": { - "type": "function", - "file": "stash.h", + "git_status_list_get_perfdata": { + "file": "sys/diff.h", "args": [ { "name": "out", - "type": "git_oid *" + "type": "git_diff_perfdata *" }, { - "name": "repo", - "type": "git_repository *" - }, + "name": "status", + "type": "const git_status_list *" + } + ], + "return": { + "type": "int" + }, + "group": "status_list" + }, + "git_tree_get_all_filepaths": { + "args": [ { - "name": "stasher", - "type": "const git_signature *" + "name": "tree", + "type": "git_tree *" }, { - "name": "message", - "type": "const char *" + "name": "repo", + "type": "git_repository *" }, { - "name": "flags", - "type": "unsigned int" + "name": "out", + "type": "std::vector *" } ], + "type": "function", + "isManual": true, + "cFile": "generate/templates/manual/tree/get_all_filepaths.cc", + "isAsync": true, + "isPrototypeMethod": true, + "group": "tree", "return": { - "type": "int" - }, - "group": "stash" + "type": "int", + "isErrorCode": true + } } }, "groups": [ @@ -342,7 +1519,24 @@ "git_annotated_commit_from_ref", "git_annotated_commit_from_revspec", "git_annotated_commit_id", - "git_annotated_commit_lookup" + "git_annotated_commit_lookup", + "git_annotated_commit_ref" + ] + ], + [ + "config_iterator", + [ + "git_config_iterator_free", + "git_config_iterator_new", + "git_config_iterator_glob_new", + "git_config_multivar_iterator_new", + "git_config_next" + ] + ], + [ + "diff", + [ + "git_diff_get_perfdata" ] ], [ @@ -376,6 +1570,51 @@ "git_filter_source_flags" ] ], + [ + "hashsig", + [ + "git_hashsig_compare", + "git_hashsig_create", + "git_hashsig_create_fromfile" + ] + ], + [ + "index_conflict_iterator", + [ + "git_index_conflict_iterator_free", + "git_index_conflict_iterator_new", + "git_index_conflict_next" + ] + ], + [ + "index_iterator", + [ + "git_index_iterator_free", + "git_index_iterator_new", + "git_index_iterator_next" + ] + ], + [ + "index_name_entry", + [ + "git_index_name_add", + "git_index_name_clear", + "git_index_name_entrycount", + "git_index_name_get_byindex" + ] + ], + [ + "index_reuc_entry", + [ + "git_index_reuc_add", + "git_index_reuc_clear", + "git_index_reuc_entrycount", + "git_index_reuc_find", + "git_index_reuc_get_byindex", + "git_index_reuc_get_bypath", + "git_index_reuc_remove" + ] + ], [ "merge_file_result", [ @@ -416,6 +1655,12 @@ "git_patch_convenient_from_diff" ] ], + [ + "path", + [ + "git_path_is_gitfile" + ] + ], [ "pathspec_match_list", [ @@ -442,9 +1687,24 @@ "git_remote_reference_list" ] ], + [ + "repository", + [ + "git_repository__cleanup", + "git_repository_get_references", + "git_repository_get_submodules", + "git_repository_get_remotes", + "git_repository_refresh_references", + "git_repository_set_index", + "git_repository_statistics", + "git_repository_submodule_cache_all", + "git_repository_submodule_cache_clear" + ] + ], [ "revwalk", [ + "git_revwalk_commit_walk", "git_revwalk_fast_walk", "git_revwalk_file_history_walk" ] @@ -458,6 +1718,12 @@ "git_status_list_new" ] ], + [ + "tree", + [ + "git_tree_get_all_filepaths" + ] + ], [ "tree_entry", [ @@ -473,49 +1739,145 @@ ], "types": [ [ - "git_stash_apply_progress_t", + "git_apply_options", + { + "type": "struct", + "fields": [ + { + "type": "unsigned int", + "name": "version" + }, + { + "type": "git_apply_delta_cb", + "name": "delta_cb" + }, + { + "type": "git_apply_hunk_cb", + "name": "hunk_cb" + }, + { + "type": "void *", + "name": "payload" + } + ], + "used": { + "needs": [ + "git_apply_init_options" + ] + } + } + ], + [ + "git_blame_hunk", + { + "type": "struct", + "fields": [ + { + "name": "lines_in_hunk", + "type": "int" + }, + { + "name": "final_commit_id", + "type": "git_oid" + }, + { + "name": "final_start_line_number", + "type": "size_t" + }, + { + "name": "final_signature", + "type": "git_signature *" + }, + { + "name": "final_committer", + "type": "git_signature *" + }, + { + "name": "orig_commit_id", + "type": "git_oid" + }, + { + "name": "orig_path", + "type": "const char *" + }, + { + "name": "orig_start_line_number", + "type": "size_t" + }, + { + "name": "orig_signature", + "type": "git_signature *" + }, + { + "name": "orig_committer", + "type": "git_signature *" + }, + { + "name": "summary", + "type": "const char *" + }, + { + "name": "boundary", + "type": "char" + } + ] + } + ], + [ + "git_blob_filter_options", + { + "type": "struct", + "fields": [ + { + "name": "version", + "type": "int" + }, + { + "name": "flags", + "type": "uint32_t" + } + ] + } + ], + [ + "git_cert_ssh_raw_type_t", { "type": "enum", "fields": [ { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_NONE", + "type": "uint32_t", + "name": "GIT_CERT_SSH_RAW_TYPE_UNKNOWN", "value": 0 }, { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_LOADING_STASH", + "type": "uint32_t", + "name": "GIT_CERT_SSH_RAW_TYPE_RSA", "value": 1 }, { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX", + "type": "uint32_t", + "name": "GIT_CERT_SSH_RAW_TYPE_DSS", "value": 2 }, { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED", + "type": "uint32_t", + "name": "GIT_CERT_SSH_RAW_TYPE_KEY_ECDSA_256", "value": 3 }, { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED", + "type": "uint32_t", + "name": "GIT_CERT_SSH_RAW_TYPE_KEY_ECDSA_384", "value": 4 }, { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED", + "type": "uint32_t", + "name": "GIT_CERT_SSH_RAW_TYPE_KEY_ECDSA_521", "value": 5 }, { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED", + "type": "uint32_t", + "name": "GIT_CERT_SSH_RAW_TYPE_KEY_ED25519", "value": 6 - }, - { - "type": "int", - "name": "GIT_STASH_APPLY_PROGRESS_DONE", - "value": 7 } ] } @@ -544,6 +1906,40 @@ ] } ], + [ + "git_commit_create_options", + { + "decl": [ + "unsigned int version", + "unsigned int allow_empty_commit : 1", + "const git_signature *author", + "const git_signature *committer", + "const char *message_encoding" + ], + "fields": [ + { + "name": "version", + "type": "unsigned int" + }, + { + "name": "allow_empty_commit", + "type": "unsigned int" + }, + { + "name": "author", + "type": "const git_signature *" + }, + { + "name": "committer", + "type": "const git_signature *" + }, + { + "name": "message_encoding", + "type": "const char *" + } + ] + } + ], [ "git_describe_format_options", { @@ -610,6 +2006,27 @@ } } ], + [ + "git_diff_perfdata", + { + "type": "struct", + "fields": [ + { + "type": "unsigned int", + "name": "version", + "ignore": true + }, + { + "type": "size_t", + "name": "stat_calls" + }, + { + "type": "size_t", + "name": "oid_calculations" + } + ] + } + ], [ "git_filter", { @@ -646,45 +2063,6 @@ ] } ], - [ - "git_status_entry", - { - "fields": [ - { - "type": "git_status_t", - "name": "status" - }, - { - "type": "git_diff_delta *", - "name": "head_to_index" - }, - { - "type": "git_diff_delta *", - "name": "index_to_workdir" - } - ] - } - ], - [ - "git_diff_perfdata", - { - "type": "struct", - "fields": [ - { - "type": "unsigned int", - "name": "version" - }, - { - "type": "size_t", - "name": "stat_calls" - }, - { - "type": "size_t", - "name": "oid_calculations" - } - ] - } - ], [ "git_fetch_options", { @@ -751,44 +2129,56 @@ } ], [ - "git_off_t", + "git_hashsig", { - "type": "enum" + "type": "struct", + "fields": [] } ], [ - "git_rebase_options", + "git_index_name_entry", { "type": "struct", "fields": [ { - "type": "unsigned int", - "name": "version" + "type": "char *", + "name": "ancestor" }, { - "type": "int", - "name": "quiet" + "type": "char *", + "name": "ours" }, { - "type": "const char *", - "name": "rewrite_notes_ref" + "type": "char *", + "name": "theirs" + } + ] + } + ], + [ + "git_index_reuc_entry", + { + "type": "struct", + "fields": [ + { + "type": "uint32_t [3]", + "name": "mode" }, { - "type": "git_checkout_options", - "name": "checkout_options" + "type": "git_oid [3]", + "name": "oid" }, { - "type": "git_merge_options", - "name": "merge_options" + "type": "char *", + "name": "path" } - ], - "used": { - "needs": [ - "git_rebase_init_options", - "git_checkout_init_options", - "git_merge_init_options" - ] - } + ] + } + ], + [ + "git_off_t", + { + "type": "enum" } ], [ @@ -811,7 +2201,7 @@ "name": "sideband_progress" }, { - "type": "git_cred_acquire_cb", + "type": "git_credential_acquire_cb", "name": "credentials" }, { @@ -819,11 +2209,11 @@ "name": "certificate_check" }, { - "type": "git_transfer_progress_cb", + "type": "git_indexer_progress_cb", "name": "transfer_progress" }, { - "type": "git_push_transfer_progress", + "type": "git_push_transfer_progress_cb", "name": "push_transfer_progress", "isCallback": true }, @@ -839,6 +2229,10 @@ { "type": "void *", "name": "payload" + }, + { + "type": "git_url_resolve_cb", + "name": "resolve_url" } ], "used": { @@ -848,6 +2242,39 @@ } } ], + [ + "git_remote_create_options", + { + "type": "struct", + "fields": [ + { + "type": "unsigned int", + "name": "version" + }, + { + "type": "git_repository *", + "name": "repository" + }, + { + "type": "const char *", + "name": "name" + }, + { + "type": "const char *", + "name": "fetchspec" + }, + { + "type": "unsigned int", + "name": "flags" + } + ], + "used": { + "needs": [ + "git_remote_create_init_options" + ] + } + } + ], [ "git_remote_head", { @@ -882,15 +2309,108 @@ } ], [ - "git_time_t", + "git_path_gitfile", { - "type": "enum" + "type": "enum", + "fields": [ + { + "type": "int", + "name": "GIT_PATH_GITFILE_GITIGNORE", + "value": 0 + }, + { + "type": "int", + "name": "GIT_PATH_GITFILE_GITMODULES", + "value": 1 + }, + { + "type": "int", + "name": "GIT_PATH_GITFILE_GITATTRIBUTES", + "value": 1 + } + ] } ], [ - "git_trace_level_t", + "git_stash_apply_progress_t", { - "type": "enum" + "type": "enum", + "fields": [ + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_NONE", + "value": 0 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_LOADING_STASH", + "value": 1 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_INDEX", + "value": 2 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_MODIFIED", + "value": 3 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_ANALYZE_UNTRACKED", + "value": 4 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_CHECKOUT_UNTRACKED", + "value": 5 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_CHECKOUT_MODIFIED", + "value": 6 + }, + { + "type": "int", + "name": "GIT_STASH_APPLY_PROGRESS_DONE", + "value": 7 + } + ] + } + ], + [ + "git_stash_apply_options", + { + "type": "struct", + "fields": [ + { + "type": "unsigned int", + "name": "version" + }, + { + "type": "uint32_t", + "name": "flags" + }, + { + "type": "git_checkout_options", + "name": "checkout_options" + }, + { + "type": "git_stash_apply_progress_cb", + "name": "progress_cb" + }, + { + "type": "void *", + "name": "progress_payload" + } + ], + "used": { + "needs": [ + "git_stash_apply_init_options", + "git_checkout_init_options" + ] + } } ], [ @@ -925,7 +2445,13 @@ } ], [ - "git_stash_apply_options", + "git_trace_level_t", + { + "type": "enum" + } + ], + [ + "git_worktree_add_options", { "type": "struct", "fields": [ @@ -934,32 +2460,19 @@ "name": "version" }, { - "type": "git_stash_apply_flags", - "name": "flags" - }, - { - "type": "git_checkout_options", - "name": "checkout_options" - }, - { - "type": "git_stash_apply_progress_cb", - "name": "progress_cb" - }, - { - "type": "void *", - "name": "progress_payload" + "type": "int", + "name": "lock" } ], "used": { "needs": [ - "git_stash_apply_init_options", - "git_checkout_init_options" + "git_worktree_add_options_init" ] } } ], [ - "git_worktree_add_options", + "git_worktree_prune_options", { "type": "struct", "fields": [ @@ -968,10 +2481,15 @@ "name": "version" }, { - "type": "int", - "name": "lock" + "type": "uint32_t", + "name": "flags" } - ] + ], + "used": { + "needs": [ + "git_worktree_prune_options_init" + ] + } } ], [ @@ -996,22 +2514,6 @@ } ] } - ], - [ - "git_worktree_prune_options", - { - "type": "struct", - "fields": [ - { - "type": "unsigned int", - "name": "version" - }, - { - "type": "uint32_t", - "name": "flags" - } - ] - } ] ] }, @@ -1023,7 +2525,17 @@ "git_annotated_commit_from_ref", "git_annotated_commit_from_revspec", "git_annotated_commit_id", - "git_annotated_commit_lookup" + "git_annotated_commit_lookup", + "git_annotated_commit_ref" + ] + }, + "config": { + "functions": [ + "git_config_iterator_free", + "git_config_iterator_new", + "git_config_iterator_glob_new", + "git_config_multivar_iterator_new", + "git_config_next" ] }, "diff": { @@ -1035,11 +2547,33 @@ "git_diff_stats_free" ] }, + "index": { + "functions": [ + "git_index_conflict_iterator_free", + "git_index_conflict_iterator_new", + "git_index_conflict_next", + "git_index_iterator_free", + "git_index_iterator_new", + "git_index_iterator_next", + "git_index_name_add", + "git_index_name_clear", + "git_index_name_entrycount", + "git_index_name_get_byindex", + "git_index_reuc_add", + "git_index_reuc_clear", + "git_index_reuc_entrycount", + "git_index_reuc_find", + "git_index_reuc_get_byindex", + "git_index_reuc_get_bypath", + "git_index_reuc_remove" + ] + }, "merge": { "functions": [ "git_merge_driver_lookup", "git_merge_driver_register", "git_merge_driver_source_ancestor", + "git_merge_driver_source_file_options", "git_merge_driver_source_ours", "git_merge_driver_source_repo", "git_merge_driver_source_theirs", @@ -1059,6 +2593,10 @@ }, "odb": { "functions": [ + "git_odb_backend_loose", + "git_odb_backend_malloc", + "git_odb_backend_one_pack", + "git_odb_backend_pack", "git_odb_object_data", "git_odb_object_dup", "git_odb_object_free", @@ -1090,6 +2628,8 @@ }, "reflog": { "functions": [ + "git_reflog_entry__alloc", + "git_reflog_entry__free", "git_reflog_entry_committer", "git_reflog_entry_id_new", "git_reflog_entry_id_old", @@ -1118,11 +2658,30 @@ } }, "groups": { - "branch": [ - "git_branch_remote_name" + "blame": [ + "git_blame_file" + ], + "email": [ + "git_email_create_from_diff" ], - "stash": [ - "git_stash_save" + "note": [ + "git_note_author", + "git_note_commit_create", + "git_note_commit_iterator_new", + "git_note_commit_read", + "git_note_commit_remove", + "git_note_committer", + "git_note_create", + "git_note_default_ref", + "git_note_foreach", + "git_note_free", + "git_note_id", + "git_note_iterator_free", + "git_note_iterator_new", + "git_note_message", + "git_note_next", + "git_note_read", + "git_note_remove" ] } -} +} \ No newline at end of file diff --git a/generate/scripts/generateNativeCode.js b/generate/scripts/generateNativeCode.js index 95e0ab8754..44fb21dec5 100644 --- a/generate/scripts/generateNativeCode.js +++ b/generate/scripts/generateNativeCode.js @@ -26,6 +26,7 @@ module.exports = function generateNativeCode() { var partials = { asyncFunction: utils.readLocalFile("templates/partials/async_function.cc"), callbackHelpers: utils.readLocalFile("templates/partials/callback_helpers.cc"), + configurableCallbacks: utils.readLocalFile("templates/partials/configurable_callbacks.cc"), convertFromV8: utils.readLocalFile("templates/partials/convert_from_v8.cc"), convertToV8: utils.readLocalFile("templates/partials/convert_to_v8.cc"), doc: utils.readLocalFile("templates/partials/doc.cc"), @@ -50,11 +51,18 @@ module.exports = function generateNativeCode() { var filters = { and: require("../templates/filters/and"), argsInfo: require("../templates/filters/args_info"), + arrayTypeToPlainType: require("../templates/filters/array_type_to_plain_type"), + asElementPointer: require("../templates/filters/as_element_pointer"), + callbackArgsInfo: require("../templates/filters/callback_args_info"), + callbackArgsCount: require("../templates/filters/callback_args_count"), cppToV8: require("../templates/filters/cpp_to_v8"), defaultValue: require("../templates/filters/default_value"), fieldsInfo: require("../templates/filters/fields_info"), + getCPPFunctionForRootProto: require("../templates/filters/get_cpp_function_for_root_proto"), + hasFunctionOnRootProto: require("../templates/filters/has_function_on_root_proto"), hasReturnType: require("../templates/filters/has_return_type"), hasReturnValue: require("../templates/filters/has_return_value"), + isArrayType: require("../templates/filters/is_array_type"), isDoublePointer: require("../templates/filters/is_double_pointer"), isFixedLengthString: require("../templates/filters/is_fixed_length_string"), isOid: require("../templates/filters/is_oid"), @@ -68,8 +76,10 @@ module.exports = function generateNativeCode() { returnsCount: require("../templates/filters/returns_count"), returnsInfo: require("../templates/filters/returns_info"), subtract: require("../templates/filters/subtract"), + thisInfo: require("../templates/filters/this_info"), titleCase: require("../templates/filters/title_case"), toBool: require('../templates/filters/to_bool'), + toSizeOfArray: require("../templates/filters/to_size_of_array"), unPointer: require("../templates/filters/un_pointer"), setUnsigned: require("../templates/filters/unsigned"), upper: require("../templates/filters/upper") diff --git a/generate/scripts/helpers.js b/generate/scripts/helpers.js index b9a67dc06c..e278e1cc05 100644 --- a/generate/scripts/helpers.js +++ b/generate/scripts/helpers.js @@ -24,7 +24,9 @@ var cTypeMappings = { "uint16_t": "Number", "uint32_t": "Number", "uint64_t": "Number", - "double": "Number" + "double": "Number", + "git_object_size_t": "Number", + "git_time_t": "Number", } var collisionMappings = { @@ -40,6 +42,7 @@ var Helpers = { .replace("struct", "") .replace(utils.doublePointerRegex, "") .replace(utils.pointerRegex, "") + .replace(utils.arrayTypeRegex, "") .trim(); }, @@ -68,15 +71,23 @@ var Helpers = { }, isConstructorFunction: function(cType, fnName) { - var initFnName = cType.split('_'); + var deprecatedInitFnName = cType.split("_"); + deprecatedInitFnName.splice(-1, 0, "init"); + deprecatedInitFnName = deprecatedInitFnName.join("_"); - initFnName.splice(-1, 0, "init"); - initFnName = initFnName.join('_'); + var initFnName = cType + "_init"; - return initFnName === fnName; + return initFnName === fnName || deprecatedInitFnName === fnName; }, hasConstructor: function(type, normalizedType) { + if (normalizedType && descriptor.types[normalizedType.substr(4)]) { + var descriptorEntry = descriptor.types[normalizedType.substr(4)]; + if (descriptorEntry.hasOwnProperty('hasConstructor')) { + return descriptorEntry.hasConstructor; + } + } + return type.used && type.used.needs && type.used.needs.some(function (fnName) { @@ -156,7 +167,9 @@ var Helpers = { if (libgitType) { type.isLibgitType = true; type.isEnum = libgitType.type === "enum"; - type.hasConstructor = Helpers.hasConstructor(type, normalizedType); + type.hasConstructor = Helpers.hasConstructor(libgitType, normalizedType); + type.isClassType = !type.isEnum && !type.hasConstructor; + type.isStructType = !type.isEnum && !!type.hasConstructor; // there are no enums at the struct level currently, but we still need to override function args if (type.isEnum) { @@ -167,6 +180,8 @@ var Helpers = { } } + type.freeFunctionName = libgitType.freeFunctionName; + // we don't want to overwrite the c type of the passed in type _.merge(type, descriptor.types[normalizedType.replace("git_", "")] || {}, { cType: type.cType }); } diff --git a/generate/scripts/utils.js b/generate/scripts/utils.js index e618a954a1..c6e843134a 100644 --- a/generate/scripts/utils.js +++ b/generate/scripts/utils.js @@ -9,6 +9,7 @@ const path = require("path"); var local = path.join.bind(null, __dirname, "../"); var util = { + arrayTypeRegex: /\[\d*\]\s*/, pointerRegex: /\s*\*\s*/, doublePointerRegex: /\s*\*\*\s*/, @@ -119,29 +120,35 @@ var util = { }, syncDirs: function(fromDir, toDir) { + let toFilePaths; + let fromFilePaths; return Promise.all([ util.getFilePathsRelativeToDir(toDir), util.getFilePathsRelativeToDir(fromDir) - ]).then(function(filePaths) { - const toFilePaths = filePaths[0]; - const fromFilePaths = filePaths[1]; - - // Delete files that aren't in fromDir - toFilePaths.forEach(function(filePath) { - if (!util.isFile(path.join(fromDir, filePath))) { - fse.remove(path.join(toDir, filePath)); - } + ]) + .then(function(filePaths) { + toFilePaths = filePaths[0]; + fromFilePaths = filePaths[1]; + + // Delete files that aren't in fromDir + return Promise.all(toFilePaths.map(function(filePath) { + if (!util.isFile(path.join(fromDir, filePath))) { + return fse.remove(path.join(toDir, filePath)); + } + return Promise.resolve(); + })); + }) + .then(function() { + // Copy files that don't exist in toDir or have different contents + return Promise.all(fromFilePaths.map(function(filePath) { + const toFilePath = path.join(toDir, filePath); + const fromFilePath = path.join(fromDir, filePath); + if (!util.isFile(toFilePath) || util.readFile(toFilePath) !== util.readFile(fromFilePath)) { + return fse.copy(fromFilePath, toFilePath); + } + return Promise.resolve(); + })); }); - - // Copy files that don't exist in toDir or have different contents - fromFilePaths.forEach(function(filePath) { - const toFilePath = path.join(toDir, filePath); - const fromFilePath = path.join(fromDir, filePath); - if (!util.isFile(toFilePath) || util.readFile(toFilePath) !== util.readFile(fromFilePath)) { - fse.copy(fromFilePath, toFilePath); - } - }); - }); } }; diff --git a/generate/templates/filters/args_info.js b/generate/templates/filters/args_info.js index 0c05c30ebe..55270102d1 100644 --- a/generate/templates/filters/args_info.js +++ b/generate/templates/filters/args_info.js @@ -1,3 +1,11 @@ +var bannedCppClassNames = [ + "Buffer", + "Function", + "GitBuf", + "GitStrarray", + "Wrapper" +]; + module.exports = function(args) { var result = [], cArg, @@ -20,6 +28,9 @@ module.exports = function(args) { arg.isCppClassStringOrArray = ~["String", "Array"].indexOf(arg.cppClassName); arg.isConst = ~arg.cType.indexOf("const "); + arg.isUnwrappable = !arg.isStructType && arg.isLibgitType && !arg.isEnum && + !bannedCppClassNames.includes(arg.cppClassName); + // if we have a callback then we also need the corresponding payload for that callback if (arg.isCallbackFunction) { var payload = args.filter(function(payload) { diff --git a/generate/templates/filters/array_type_to_plain_type.js b/generate/templates/filters/array_type_to_plain_type.js new file mode 100644 index 0000000000..55f2833500 --- /dev/null +++ b/generate/templates/filters/array_type_to_plain_type.js @@ -0,0 +1,3 @@ +module.exports = function(cType) { + return /(.*)\s\[\d+\]\s*/.exec(cType)[1]; +}; diff --git a/generate/templates/filters/as_element_pointer.js b/generate/templates/filters/as_element_pointer.js new file mode 100644 index 0000000000..8b34eed172 --- /dev/null +++ b/generate/templates/filters/as_element_pointer.js @@ -0,0 +1,7 @@ +const isArrayType = require("./is_array_type"); + +module.exports = function(cType, parsedName) { + return isArrayType(cType) ? + "&" + parsedName + "[i]" : + parsedName; +}; diff --git a/generate/templates/filters/callback_args_count.js b/generate/templates/filters/callback_args_count.js new file mode 100644 index 0000000000..26c7762ead --- /dev/null +++ b/generate/templates/filters/callback_args_count.js @@ -0,0 +1,18 @@ +module.exports = function(args) { + if (!args) { + return 0; + } + + return args.reduce( + function(count, arg) { + var shouldCount = !arg.isReturn && + !arg.isSelf && + arg.name !== "payload" && + arg.name !== "self" && + !arg.ignore; + + return shouldCount ? count + 1 : count; + }, + 0 + ); +}; diff --git a/generate/templates/filters/callback_args_info.js b/generate/templates/filters/callback_args_info.js new file mode 100644 index 0000000000..a7285c0b85 --- /dev/null +++ b/generate/templates/filters/callback_args_info.js @@ -0,0 +1,27 @@ +module.exports = function(args) { + var result = args.reduce( + function(argList, arg) { + var useArg = !arg.isReturn && + !arg.isSelf && + arg.name !== "payload" && + arg.name !== "self" && + !arg.ignore; + + if (!useArg) { + return argList; + } + + arg.firstArg = argList.length === 0; + argList.push(arg); + + return argList; + }, + [] + ); + + if (result.length) { + result[result.length - 1].lastArg = true; + } + + return result; +}; diff --git a/generate/templates/filters/fields_info.js b/generate/templates/filters/fields_info.js index 7e1e1211b4..6022aab127 100644 --- a/generate/templates/filters/fields_info.js +++ b/generate/templates/filters/fields_info.js @@ -1,15 +1,26 @@ +var bannedCppClassNames = [ + "Buffer", + "Function", + "GitBuf", + "GitStrarray", + "Wrapper" +]; + module.exports = function(fields) { var result = []; - fields.forEach(function (field){ + fields.forEach(function (field, index){ var fieldInfo = {}; fieldInfo.__proto__ = field; + fieldInfo.index = index; fieldInfo.parsedName = field.name || "result"; fieldInfo.isCppClassIntType = ~["Uint32", "Int32"].indexOf(field.cppClassName); fieldInfo.parsedClassName = (field.cppClassName || '').toLowerCase() + "_t"; fieldInfo.hasOwner = !fieldInfo.selfOwned && !!fieldInfo.ownedByThis; + fieldInfo.isUnwrappable = fieldInfo.isLibgitType && !fieldInfo.isEnum && + !bannedCppClassNames.includes(fieldInfo.cppClassName); result.push(fieldInfo); }); diff --git a/generate/templates/filters/get_cpp_function_for_root_proto.js b/generate/templates/filters/get_cpp_function_for_root_proto.js new file mode 100644 index 0000000000..6571af880a --- /dev/null +++ b/generate/templates/filters/get_cpp_function_for_root_proto.js @@ -0,0 +1,12 @@ +module.exports = function(functions) { + if (!functions || functions.length === 0) { + throw new Error("Should not be able to get function from empty function list"); + } + + const fun = functions.find(function(f) { return f.useAsOnRootProto; }); + if (!fun) { + throw new Error("There is no function on the root prototype for this collection"); + } + + return fun.cppFunctionName; +}; diff --git a/generate/templates/filters/has_function_on_root_proto.js b/generate/templates/filters/has_function_on_root_proto.js new file mode 100644 index 0000000000..626ce0ff65 --- /dev/null +++ b/generate/templates/filters/has_function_on_root_proto.js @@ -0,0 +1,7 @@ +module.exports = function(functions) { + if (!functions || functions.length === 0) { + return false; + } + + return functions.some(function(f) { return f.useAsOnRootProto; }); +}; diff --git a/generate/templates/filters/is_array_type.js b/generate/templates/filters/is_array_type.js new file mode 100644 index 0000000000..d633d9e407 --- /dev/null +++ b/generate/templates/filters/is_array_type.js @@ -0,0 +1,3 @@ +module.exports = function(cType) { + return /\s\[\d+\]\s*/.test(cType); +}; diff --git a/generate/templates/filters/js_args_count.js b/generate/templates/filters/js_args_count.js index 5be437f417..17a56a1c11 100644 --- a/generate/templates/filters/js_args_count.js +++ b/generate/templates/filters/js_args_count.js @@ -5,11 +5,11 @@ module.exports = function(args) { if (!args) { return 0; } - + for(cArg = 0, jsArg = 0; cArg < args.length; cArg++) { var arg = args[cArg]; - if (!arg.isReturn && !arg.isSelf && !arg.isPayload) { + if (!arg.isReturn && !arg.isSelf) { jsArg++; } } diff --git a/generate/templates/filters/returns_info.js b/generate/templates/filters/returns_info.js index 2d178e2c15..5f7bcdc08a 100644 --- a/generate/templates/filters/returns_info.js +++ b/generate/templates/filters/returns_info.js @@ -52,7 +52,7 @@ module.exports = function(fn, argReturnsOnly, isAsync) { // sync functions will need to know this. if (!isAsync && return_info.ownedBy) { return_info.ownedBy.forEach(function (argName) { - return_info.ownedByIndices.push(nameToArgIndex[return_info.ownedBy]); + return_info.ownedByIndices.push(nameToArgIndex[argName]); }) } @@ -84,7 +84,11 @@ module.exports = function(fn, argReturnsOnly, isAsync) { }); } - return_info.parsedName = return_info.name && isAsync ? "baton->" + return_info.name : "result"; + if (isAsync) { + return_info.parsedName = "baton->" + (return_info.name || "result"); + } else { + return_info.parsedName = "result"; + } return_info.isCppClassIntType = ~['Uint32', 'Int32'].indexOf(return_info.cppClassName); return_info.parsedClassName = (return_info.cppClassName || '').toLowerCase() + "_t"; return_info.returnNameOrName = return_info.returnName || return_info.name; diff --git a/generate/templates/filters/this_info.js b/generate/templates/filters/this_info.js new file mode 100644 index 0000000000..e5d57520ab --- /dev/null +++ b/generate/templates/filters/this_info.js @@ -0,0 +1,8 @@ +module.exports = function(args, fieldToRetrieve) { + const thisArg = args.find(arg => arg.isSelf); + if (thisArg) { + return thisArg[fieldToRetrieve]; + } + + return; +}; diff --git a/generate/templates/filters/to_size_of_array.js b/generate/templates/filters/to_size_of_array.js new file mode 100644 index 0000000000..b56e9315fa --- /dev/null +++ b/generate/templates/filters/to_size_of_array.js @@ -0,0 +1,3 @@ +module.exports = function(cType) { + return /\s\[(\d+)\]\s*/.exec(cType)[1]; +}; diff --git a/generate/templates/manual/clone/clone.cc b/generate/templates/manual/clone/clone.cc index 7ddcd559b4..02c47ff58b 100644 --- a/generate/templates/manual/clone/clone.cc +++ b/generate/templates/manual/clone/clone.cc @@ -21,11 +21,26 @@ NAN_METHOD(GitClone::Clone) { return Nan::ThrowError("String local_path is required."); } - if (info.Length() == 3 || !info[3]->IsFunction()) { + if (!info[info.Length() - 1]->IsFunction()) { return Nan::ThrowError("Callback is required and must be a Function."); } - CloneBaton *baton = new CloneBaton; + CloneBaton *baton = new CloneBaton(); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + std::map> cleanupHandles; + + if (info[2]->IsNull() || info[2]->IsUndefined()) { + baton->options = nullptr; + } else { + auto conversionResult = ConfigurableGitCloneOptions::fromJavascript(nodegitContext, info[2]); + if (!conversionResult.result) { + return Nan::ThrowError(Nan::New(conversionResult.error).ToLocalChecked()); + } + + auto convertedObject = conversionResult.result; + cleanupHandles["options"] = convertedObject; + baton->options = convertedObject->GetValue(); + } baton->error_code = GIT_OK; baton->error = NULL; @@ -33,7 +48,7 @@ NAN_METHOD(GitClone::Clone) { // start convert_from_v8 block const char *from_url = NULL; - String::Utf8Value url(info[0]->ToString()); + Nan::Utf8String url(Nan::To(info[0]).ToLocalChecked()); // malloc with one extra byte so we can add the terminating null character // C-strings expect: from_url = (const char *)malloc(url.length() + 1); @@ -50,7 +65,7 @@ NAN_METHOD(GitClone::Clone) { // start convert_from_v8 block const char *from_local_path = NULL; - String::Utf8Value local_path(info[1]->ToString()); + Nan::Utf8String local_path(Nan::To(info[1]).ToLocalChecked()); // malloc with one extra byte so we can add the terminating null character // C-strings expect: from_local_path = (const char *)malloc(local_path.length() + 1); @@ -64,59 +79,67 @@ NAN_METHOD(GitClone::Clone) { memset((void *)(((char *)from_local_path) + local_path.length()), 0, 1); // end convert_from_v8 block baton->local_path = from_local_path; - // start convert_from_v8 block - const git_clone_options *from_options = NULL; - if (info[2]->IsObject()) { - from_options = Nan::ObjectWrap::Unwrap(info[2]->ToObject()) - ->GetValue(); - } else { - from_options = 0; - } - // end convert_from_v8 block - baton->options = from_options; Nan::Callback *callback = - new Nan::Callback(v8::Local::Cast(info[3])); - CloneWorker *worker = new CloneWorker(baton, callback); + new Nan::Callback(v8::Local::Cast(info[info.Length() - 1])); + CloneWorker *worker = new CloneWorker(baton, callback, cleanupHandles); - if (!info[0]->IsUndefined() && !info[0]->IsNull()) - worker->SaveToPersistent("url", info[0]->ToObject()); - if (!info[1]->IsUndefined() && !info[1]->IsNull()) - worker->SaveToPersistent("local_path", info[1]->ToObject()); - if (!info[2]->IsUndefined() && !info[2]->IsNull()) - worker->SaveToPersistent("options", info[2]->ToObject()); + worker->Reference("url", info[0]); + worker->Reference("local_path", info[1]); - AsyncLibgit2QueueWorker(worker); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitClone::CloneWorker::AcquireLocks() { + nodegit::LockMaster lockMaster( + true, + baton->url, + baton->local_path, + baton->options + ); + return lockMaster; +} + void GitClone::CloneWorker::Execute() { - giterr_clear(); + git_error_clear(); - { - LockMaster lockMaster( - /*asyncAction: */ true, baton->url, baton->local_path, baton->options); + git_repository *repo; + int result = + git_clone(&repo, baton->url, baton->local_path, baton->options); - git_repository *repo; - int result = - git_clone(&repo, baton->url, baton->local_path, baton->options); + if (result == GIT_OK) { + // This is required to clean up after the clone to avoid file locking + // issues in Windows and potentially other issues we don't know about. + git_repository_free(repo); - if (result == GIT_OK) { - // This is required to clean up after the clone to avoid file locking - // issues in Windows and potentially other issues we don't know about. - git_repository_free(repo); + // We want to provide a valid repository object, so reopen the repository + // after clone and cleanup. + result = git_repository_open(&baton->out, baton->local_path); + } - // We want to provide a valid repository object, so reopen the repository - // after clone and cleanup. - result = git_repository_open(&baton->out, baton->local_path); - } + baton->error_code = result; - baton->error_code = result; + if (result != GIT_OK && git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } +} - if (result != GIT_OK && giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); +void GitClone::CloneWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); } + + free((void *)baton->error); } + + git_repository_free(baton->out); + + free((void*)baton->url); + free((void*)baton->local_path); + + delete baton; } void GitClone::CloneWorker::HandleOKCallback() { @@ -140,12 +163,12 @@ void GitClone::CloneWorker::HandleOKCallback() { if (baton->error) { v8::Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method clone has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method clone has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Clone.clone").ToLocalChecked()); v8::Local argv[1] = {err}; callback->Call(1, argv, async_resource); @@ -153,51 +176,24 @@ void GitClone::CloneWorker::HandleOKCallback() { free((void *)baton->error->message); free((void *)baton->error); } else if (baton->error_code < 0) { - std::queue> workerArguments; - workerArguments.push(GetFromPersistent("url")); - workerArguments.push(GetFromPersistent("local_path")); - workerArguments.push(GetFromPersistent("options")); bool callbackFired = false; - while (!workerArguments.empty()) { - v8::Local node = workerArguments.front(); - workerArguments.pop(); - - if (!node->IsObject() || node->IsArray() || node->IsBooleanObject() || - node->IsDate() || node->IsFunction() || node->IsNumberObject() || - node->IsRegExp() || node->IsStringObject()) { - continue; - } - - v8::Local nodeObj = node->ToObject(); - v8::Local checkValue = GetPrivate( - nodeObj, Nan::New("NodeGitPromiseError").ToLocalChecked()); - - if (!checkValue.IsEmpty() && !checkValue->IsNull() && - !checkValue->IsUndefined()) { - v8::Local argv[1] = {checkValue->ToObject()}; + if (!callbackErrorHandle.IsEmpty()) { + v8::Local maybeError = Nan::New(callbackErrorHandle); + if (!maybeError->IsNull() && !maybeError->IsUndefined()) { + v8::Local argv[1] = { + maybeError + }; callback->Call(1, argv, async_resource); callbackFired = true; - break; - } - - v8::Local properties = nodeObj->GetPropertyNames(); - for (unsigned int propIndex = 0; propIndex < properties->Length(); - ++propIndex) { - v8::Local propName = - properties->Get(propIndex)->ToString(); - v8::Local nodeToQueue = nodeObj->Get(propName); - if (!nodeToQueue->IsUndefined()) { - workerArguments.push(nodeToQueue); - } } } if (!callbackFired) { v8::Local err = - Nan::Error("Method clone has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), + Nan::To(Nan::Error("Method clone has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Clone.clone").ToLocalChecked()); v8::Local argv[1] = {err}; callback->Call(1, argv, async_resource); @@ -207,5 +203,8 @@ void GitClone::CloneWorker::HandleOKCallback() { } } + free((void*)baton->url); + free((void*)baton->local_path); + delete baton; } diff --git a/generate/templates/manual/commit/extract_signature.cc b/generate/templates/manual/commit/extract_signature.cc index 911a31eef4..82a2141116 100644 --- a/generate/templates/manual/commit/extract_signature.cc +++ b/generate/templates/manual/commit/extract_signature.cc @@ -8,48 +8,42 @@ NAN_METHOD(GitCommit::ExtractSignature) return Nan::ThrowError("Oid commit_id is required."); } - if (info.Length() == 2 || (info.Length() == 3 && !info[2]->IsFunction())) { - return Nan::ThrowError("Callback is required and must be a Function."); + if (info.Length() >= 4 && !info[2]->IsString() && !info[2]->IsUndefined() && !info[2]->IsNull()) { + return Nan::ThrowError("String signature_field must be a string or undefined/null."); } - if (info.Length() >= 4) { - if (!info[2]->IsString() && !info[2]->IsUndefined() && !info[2]->IsNull()) { - return Nan::ThrowError("String signature_field must be a string or undefined/null."); - } - - if (!info[3]->IsFunction()) { - return Nan::ThrowError("Callback is required and must be a Function."); - } + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); } - ExtractSignatureBaton* baton = new ExtractSignatureBaton; + ExtractSignatureBaton* baton = new ExtractSignatureBaton(); baton->error_code = GIT_OK; baton->error = NULL; baton->signature = GIT_BUF_INIT_CONST(NULL, 0); baton->signed_data = GIT_BUF_INIT_CONST(NULL, 0); - baton->repo = Nan::ObjectWrap::Unwrap(info[0]->ToObject())->GetValue(); + baton->repo = Nan::ObjectWrap::Unwrap(Nan::To(info[0]).ToLocalChecked())->GetValue(); // baton->commit_id if (info[1]->IsString()) { - String::Utf8Value oidString(info[1]->ToString()); + Nan::Utf8String oidString(Nan::To(info[1]).ToLocalChecked()); baton->commit_id = (git_oid *)malloc(sizeof(git_oid)); if (git_oid_fromstr(baton->commit_id, (const char *)strdup(*oidString)) != GIT_OK) { free(baton->commit_id); - if (giterr_last()) { - return Nan::ThrowError(giterr_last()->message); + if (git_error_last()->klass != GIT_ERROR_NONE) { + return Nan::ThrowError(git_error_last()->message); } else { return Nan::ThrowError("Unknown Error"); } } } else { - baton->commit_id = Nan::ObjectWrap::Unwrap(info[1]->ToObject())->GetValue(); + baton->commit_id = Nan::ObjectWrap::Unwrap(Nan::To(info[1]).ToLocalChecked())->GetValue(); } // baton->field if (info[2]->IsString()) { - String::Utf8Value field(info[2]->ToString()); + Nan::Utf8String field(Nan::To(info[2]).ToLocalChecked()); baton->field = (char *)malloc(field.length() + 1); memcpy((void *)baton->field, *field, field.length()); baton->field[field.length()] = 0; @@ -57,42 +51,54 @@ NAN_METHOD(GitCommit::ExtractSignature) baton->field = NULL; } - Nan::Callback *callback; - if (info[2]->IsFunction()) { - callback = new Nan::Callback(Local::Cast(info[2])); - } else { - callback = new Nan::Callback(Local::Cast(info[3])); - } + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); - ExtractSignatureWorker *worker = new ExtractSignatureWorker(baton, callback); - worker->SaveToPersistent("repo", info[0]->ToObject()); - worker->SaveToPersistent("commit_id", info[1]->ToObject()); - Nan::AsyncQueueWorker(worker); + std::map> cleanupHandles; + ExtractSignatureWorker *worker = new ExtractSignatureWorker(baton, callback, cleanupHandles); + worker->Reference("repo", info[0]); + worker->Reference("commit_id", info[1]); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitCommit::ExtractSignatureWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->repo); + return lockMaster; +} + void GitCommit::ExtractSignatureWorker::Execute() { - giterr_clear(); - - { - LockMaster lockMaster( - /*asyncAction: */true, - baton->repo - ); - - baton->error_code = git_commit_extract_signature( - &baton->signature, - &baton->signed_data, - baton->repo, - baton->commit_id, - (const char *)baton->field - ); + git_error_clear(); + + baton->error_code = git_commit_extract_signature( + &baton->signature, + &baton->signed_data, + baton->repo, + baton->commit_id, + (const char *)baton->field + ); + + if (baton->error_code != GIT_OK) { + baton->error = git_error_dup(git_error_last()); + } +} - if (baton->error_code != GIT_OK && giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); +void GitCommit::ExtractSignatureWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); } + + free((void *)baton->error); } + + git_buf_dispose(&baton->signature); + git_buf_dispose(&baton->signed_data); + + free(baton->field); + + delete baton; } void GitCommit::ExtractSignatureWorker::HandleOKCallback() @@ -132,9 +138,9 @@ void GitCommit::ExtractSignatureWorker::HandleOKCallback() } else if (baton->error_code < 0) { - Local err = Nan::Error("Extract Signature has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Commit.extractSignature").ToLocalChecked()); + Local err = Nan::To(Nan::Error("Extract Signature has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Commit.extractSignature").ToLocalChecked()); Local argv[1] = { err }; @@ -145,8 +151,8 @@ void GitCommit::ExtractSignatureWorker::HandleOKCallback() callback->Call(0, NULL, async_resource); } - git_buf_free(&baton->signature); - git_buf_free(&baton->signed_data); + git_buf_dispose(&baton->signature); + git_buf_dispose(&baton->signed_data); if (baton->field != NULL) { free((void *)baton->field); diff --git a/generate/templates/manual/filter_list/load.cc b/generate/templates/manual/filter_list/load.cc index 1e7788e65f..22e2f1f4f3 100644 --- a/generate/templates/manual/filter_list/load.cc +++ b/generate/templates/manual/filter_list/load.cc @@ -35,11 +35,11 @@ NAN_METHOD(GitFilterList::Load) { return Nan::ThrowError("Number flags is required."); } - if (info.Length() == 5 || !info[5]->IsFunction()) { + if (!info[info.Length() - 1]->IsFunction()) { return Nan::ThrowError("Callback is required and must be a Function."); } - LoadBaton *baton = new LoadBaton; + LoadBaton *baton = new LoadBaton(); baton->error_code = GIT_OK; baton->error = NULL; @@ -47,14 +47,14 @@ NAN_METHOD(GitFilterList::Load) { // start convert_from_v8 block git_repository *from_repo = NULL; from_repo = - Nan::ObjectWrap::Unwrap(info[0]->ToObject())->GetValue(); + Nan::ObjectWrap::Unwrap(Nan::To(info[0]).ToLocalChecked())->GetValue(); // end convert_from_v8 block baton->repo = from_repo; // start convert_from_v8 block git_blob *from_blob = NULL; if (info[1]->IsObject()) { from_blob = - Nan::ObjectWrap::Unwrap(info[1]->ToObject())->GetValue(); + Nan::ObjectWrap::Unwrap(Nan::To(info[1]).ToLocalChecked())->GetValue(); } else { from_blob = 0; } @@ -63,7 +63,7 @@ NAN_METHOD(GitFilterList::Load) { // start convert_from_v8 block const char *from_path = NULL; - String::Utf8Value path(info[2]->ToString()); + Nan::Utf8String path(Nan::To(info[2]).ToLocalChecked()); // malloc with one extra byte so we can add the terminating null character // C-strings expect: from_path = (const char *)malloc(path.length() + 1); @@ -89,40 +89,55 @@ NAN_METHOD(GitFilterList::Load) { baton->flags = from_flags; Nan::Callback *callback = - new Nan::Callback(v8::Local::Cast(info[5])); - LoadWorker *worker = new LoadWorker(baton, callback); - - if (!info[0]->IsUndefined() && !info[0]->IsNull()) - worker->SaveToPersistent("repo", info[0]->ToObject()); - if (!info[1]->IsUndefined() && !info[1]->IsNull()) - worker->SaveToPersistent("blob", info[1]->ToObject()); - if (!info[2]->IsUndefined() && !info[2]->IsNull()) - worker->SaveToPersistent("path", info[2]->ToObject()); - if (!info[3]->IsUndefined() && !info[3]->IsNull()) - worker->SaveToPersistent("mode", info[3]->ToObject()); - if (!info[4]->IsUndefined() && !info[4]->IsNull()) - worker->SaveToPersistent("flags", info[4]->ToObject()); - - AsyncLibgit2QueueWorker(worker); + new Nan::Callback(v8::Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + LoadWorker *worker = new LoadWorker(baton, callback, cleanupHandles); + + worker->Reference("repo", info[0]); + worker->Reference("blob", info[1]); + + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitFilterList::LoadWorker::AcquireLocks() { + nodegit::LockMaster lockMaster( + true, + baton->repo, + baton->blob, + baton->path + ); + return lockMaster; +} + void GitFilterList::LoadWorker::Execute() { - giterr_clear(); + git_error_clear(); - { - LockMaster lockMaster( - /*asyncAction: */ true, baton->repo, baton->blob, baton->path); + int result = git_filter_list_load(&baton->filters, baton->repo, baton->blob, + baton->path, baton->mode, baton->flags); - int result = git_filter_list_load(&baton->filters, baton->repo, baton->blob, - baton->path, baton->mode, baton->flags); + baton->error_code = result; - baton->error_code = result; + if (result != GIT_OK && git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } +} - if (result != GIT_OK && giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); +void GitFilterList::LoadWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); } + + free((void *)baton->error); } + + git_filter_list_free(baton->filters); + + free((void *)baton->path); + + delete baton; } void GitFilterList::LoadWorker::HandleOKCallback() { @@ -133,32 +148,26 @@ void GitFilterList::LoadWorker::HandleOKCallback() { if (baton->filters != NULL) { // GitFilterList baton->filters v8::Local owners = Nan::New(0); - v8::Local filterRegistry = Nan::New(GitFilterRegistry::persistentHandle); - v8::Local propertyNames = filterRegistry->GetPropertyNames(); + nodegit::Context *nodegitContext = nodegit::Context::GetCurrentContext(); Nan::Set( owners, Nan::New(0), - this->GetFromPersistent("repo")->ToObject() + Nan::To(this->GetFromPersistent("repo")).ToLocalChecked() ); - for (uint32_t index = 0; index < propertyNames->Length(); ++index) { - v8::Local propertyName = propertyNames->Get(index)->ToString(); - String::Utf8Value propertyNameAsUtf8Value(propertyName); - const char *propertyNameAsCString = *propertyNameAsUtf8Value; - - bool isNotMethodOnRegistry = strcmp("register", propertyNameAsCString) - && strcmp("unregister", propertyNameAsCString); - if (isNotMethodOnRegistry && git_filter_list_contains(baton->filters, propertyNameAsCString)) { - Nan::Set( - owners, - Nan::New(owners->Length()), - filterRegistry->Get(propertyName) - ); + to = GitFilterList::New(baton->filters, true, Nan::To(owners).ToLocalChecked()); + auto filterListWrapper = Nan::ObjectWrap::Unwrap(to.As()); + auto filterRegistryCleanupHandles = static_pointer_cast(nodegit::Context::GetCurrentContext()->GetCleanupHandle("filterRegistry")); + std::for_each( + filterRegistryCleanupHandles->registeredFilters.begin(), + filterRegistryCleanupHandles->registeredFilters.end(), + [this, &filterListWrapper](std::pair> filterCleanupHandle) { + if (git_filter_list_contains(baton->filters, filterCleanupHandle.first.c_str())) { + filterListWrapper->SaveCleanupHandle(filterCleanupHandle.second); + } } - } - - to = GitFilterList::New(baton->filters, true, owners->ToObject()); + ); } else { to = Nan::Null(); } @@ -172,12 +181,12 @@ void GitFilterList::LoadWorker::HandleOKCallback() { if (baton->error) { v8::Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method load has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method load has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterList.load").ToLocalChecked()); v8::Local argv[1] = {err}; callback->Call(1, argv, async_resource); @@ -185,53 +194,24 @@ void GitFilterList::LoadWorker::HandleOKCallback() { free((void *)baton->error->message); free((void *)baton->error); } else if (baton->error_code < 0) { - std::queue> workerArguments; - workerArguments.push(GetFromPersistent("repo")); - workerArguments.push(GetFromPersistent("blob")); - workerArguments.push(GetFromPersistent("path")); - workerArguments.push(GetFromPersistent("mode")); - workerArguments.push(GetFromPersistent("flags")); bool callbackFired = false; - while (!workerArguments.empty()) { - v8::Local node = workerArguments.front(); - workerArguments.pop(); - - if (!node->IsObject() || node->IsArray() || node->IsBooleanObject() || - node->IsDate() || node->IsFunction() || node->IsNumberObject() || - node->IsRegExp() || node->IsStringObject()) { - continue; - } - - v8::Local nodeObj = node->ToObject(); - v8::Local checkValue = GetPrivate( - nodeObj, Nan::New("NodeGitPromiseError").ToLocalChecked()); - - if (!checkValue.IsEmpty() && !checkValue->IsNull() && - !checkValue->IsUndefined()) { - v8::Local argv[1] = {checkValue->ToObject()}; + if (!callbackErrorHandle.IsEmpty()) { + v8::Local maybeError = Nan::New(callbackErrorHandle); + if (!maybeError->IsNull() && !maybeError->IsUndefined()) { + v8::Local argv[1] = { + maybeError + }; callback->Call(1, argv, async_resource); callbackFired = true; - break; - } - - v8::Local properties = nodeObj->GetPropertyNames(); - for (unsigned int propIndex = 0; propIndex < properties->Length(); - ++propIndex) { - v8::Local propName = - properties->Get(propIndex)->ToString(); - v8::Local nodeToQueue = nodeObj->Get(propName); - if (!nodeToQueue->IsUndefined()) { - workerArguments.push(nodeToQueue); - } } } if (!callbackFired) { v8::Local err = - Nan::Error("Method load has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), + Nan::To(Nan::Error("Method load has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterList.load").ToLocalChecked()); v8::Local argv[1] = {err}; callback->Call(1, argv, async_resource); @@ -241,5 +221,7 @@ void GitFilterList::LoadWorker::HandleOKCallback() { } } + free((void *)baton->path); + delete baton; } diff --git a/generate/templates/manual/filter_source/repo.cc b/generate/templates/manual/filter_source/repo.cc new file mode 100644 index 0000000000..78903d86b6 --- /dev/null +++ b/generate/templates/manual/filter_source/repo.cc @@ -0,0 +1,107 @@ +// NOTE you may need to occasionally rebuild this method by calling the generators +// if major changes are made to the templates / generator. + +// Due to some garbage collection issues related to submodules and git_filters, we need to clone the repository +// pointer before giving it to a user. + +/* + * @param Repository callback + */ +NAN_METHOD(GitFilterSource::Repo) { + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + RepoBaton *baton = new RepoBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->src = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + + Nan::Callback *callback = new Nan::Callback(v8::Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + RepoWorker *worker = new RepoWorker(baton, callback, cleanupHandles); + + worker->Reference("src", info.This()); + + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + return; +} + +nodegit::LockMaster GitFilterSource::RepoWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->src); + return lockMaster; +} + +void GitFilterSource::RepoWorker::Execute() { + git_error_clear(); + + git_repository *repo = git_filter_source_repo(baton->src); + baton->error_code = git_repository_open(&repo, git_repository_path(repo)); + + if (baton->error_code == GIT_OK) { + baton->out = repo; + } else if (git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } +} + +void GitFilterSource::RepoWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + git_repository_free(baton->out); + + delete baton; +} + +void GitFilterSource::RepoWorker::HandleOKCallback() { + if (baton->error_code == GIT_OK) { + v8::Local to; + + if (baton->out != NULL) { + to = GitRepository::New(baton->out, true); + } else { + to = Nan::Null(); + } + + v8::Local argv[2] = {Nan::Null(), to}; + callback->Call(2, argv, async_resource); + } else { + if (baton->error) { + v8::Local err; + if (baton->error->message) { + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); + } else { + err = Nan::To(Nan::Error("Method repo has thrown an error.")).ToLocalChecked(); + } + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), + Nan::New("FilterSource.repo").ToLocalChecked()); + v8::Local argv[1] = {err}; + callback->Call(1, argv, async_resource); + if (baton->error->message) + free((void *)baton->error->message); + free((void *)baton->error); + } else if (baton->error_code < 0) { + v8::Local err = + Nan::To(Nan::Error("Method repo has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), + Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), + Nan::New("FilterSource.repo").ToLocalChecked()); + v8::Local argv[1] = {err}; + callback->Call(1, argv, async_resource); + } else { + callback->Call(0, NULL, async_resource); + } + } + + delete baton; +} diff --git a/generate/templates/manual/include/async_baton.h b/generate/templates/manual/include/async_baton.h index f8373cd0de..33da4a4520 100644 --- a/generate/templates/manual/include/async_baton.h +++ b/generate/templates/manual/include/async_baton.h @@ -1,97 +1,78 @@ #ifndef ASYNC_BATON #define ASYNC_BATON -#include +#include +#include +#include #include #include "lock_master.h" #include "nodegit.h" +#include "thread_pool.h" -// Base class for Batons used for callbacks (for example, -// JS functions passed as callback parameters, -// or field properties of configuration objects whose values are callbacks) -struct AsyncBaton { - uv_sem_t semaphore; - - virtual ~AsyncBaton() {} -}; - -void deleteBaton(AsyncBaton *baton); - -template -struct AsyncBatonWithResult : public AsyncBaton { - ResultT result; - ResultT defaultResult; // result returned if the callback doesn't return anything valid - void (*onCompletion)(AsyncBaton *); - - AsyncBatonWithResult(const ResultT &defaultResult) - : defaultResult(defaultResult) { - } - - void Done() { - if (onCompletion) { - onCompletion(this); - } else { - // signal completion - uv_sem_post(&semaphore); - } - } - - ResultT ExecuteAsync(ThreadPool::Callback asyncCallback, void (*onCompletion)(AsyncBaton *) = NULL) { - result = 0; - this->onCompletion = onCompletion; - if (!onCompletion) { - uv_sem_init(&semaphore, 0); - } - - { - LockMaster::TemporaryUnlock temporaryUnlock; - - libgit2ThreadPool.ExecuteReverseCallback(asyncCallback, this); - - if (!onCompletion) { - // wait for completion - uv_sem_wait(&semaphore); - uv_sem_destroy(&semaphore); +namespace nodegit { + // Base class for Batons used for callbacks (for example, + // JS functions passed as callback parameters, + // or field properties of configuration objects whose values are callbacks) + class AsyncBaton { + public: + typedef std::function AsyncCallback; + typedef std::function CompletionCallback; + + AsyncBaton(); + AsyncBaton(const AsyncBaton &) = delete; + AsyncBaton(AsyncBaton &&) = delete; + AsyncBaton &operator=(const AsyncBaton &) = delete; + AsyncBaton &operator=(AsyncBaton &&) = delete; + + virtual ~AsyncBaton() {} + + void Done(); + + Nan::AsyncResource *GetAsyncResource(); + + void SetCallbackError(v8::Local error); + + protected: + void ExecuteAsyncPerform(AsyncCallback asyncCallback, AsyncCallback asyncCancelCb, CompletionCallback onCompletion); + + private: + void SignalCompletion(); + void WaitForCompletion(); + + Nan::AsyncResource *asyncResource; + Nan::Global &callbackErrorHandle; + ThreadPool::Callback onCompletion; + std::unique_ptr completedMutex; + std::condition_variable completedCondition; + bool hasCompleted; + }; + + void deleteBaton(AsyncBaton *baton); + + template + class AsyncBatonWithResult : public AsyncBaton { + public: + ResultT defaultResult; // result returned if the callback doesn't return anything valid + ResultT result; + + AsyncBatonWithResult(const ResultT &defaultResult) + : defaultResult(defaultResult) { } - } - - return result; - } -}; - -struct AsyncBatonWithNoResult : public AsyncBaton { - void (*onCompletion)(AsyncBaton *); - - void Done() { - if (onCompletion) { - onCompletion(this); - } else { - // signal completion - uv_sem_post(&semaphore); - } - } - - void ExecuteAsync(ThreadPool::Callback asyncCallback, void (*onCompletion)(AsyncBaton *) = NULL) { - this->onCompletion = onCompletion; - if (!onCompletion) { - uv_sem_init(&semaphore, 0); - } - - { - LockMaster::TemporaryUnlock temporaryUnlock; - - libgit2ThreadPool.ExecuteReverseCallback(asyncCallback, this); - - if (!onCompletion) { - // wait for completion - uv_sem_wait(&semaphore); - uv_sem_destroy(&semaphore); + + ResultT ExecuteAsync(AsyncBaton::AsyncCallback asyncCallback, AsyncBaton::AsyncCallback asyncCancelCb, AsyncBaton::CompletionCallback onCompletion = nullptr) { + result = 0; + ExecuteAsyncPerform(asyncCallback, asyncCancelCb, onCompletion); + return result; } - } + }; - return; - } -}; + class AsyncBatonWithNoResult : public AsyncBaton { + public: + void ExecuteAsync(AsyncBaton::AsyncCallback asyncCallback, AsyncBaton::AsyncCallback asyncCancelCb, AsyncBaton::CompletionCallback onCompletion = nullptr) { + ExecuteAsyncPerform(asyncCallback, asyncCancelCb, onCompletion); + } + }; +} #endif diff --git a/generate/templates/manual/include/async_libgit2_queue_worker.h b/generate/templates/manual/include/async_libgit2_queue_worker.h deleted file mode 100644 index f3ddf2fb3f..0000000000 --- a/generate/templates/manual/include/async_libgit2_queue_worker.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef ASYNC_LIBGIT2_QUEUE_WORKER_H -#define ASYNC_LIBGIT2_QUEUE_WORKER_H - -#include -#include -#include "../include/thread_pool.h" -#include "../include/nodegit.h" - - -// Runs WorkComplete of the scheduled AsyncWorker, -// and destroys it. This is run in the uv_default_loop event loop. -NAN_INLINE void AsyncLibgit2Complete (void* data) { - Nan::AsyncWorker *worker = static_cast(data); - worker->WorkComplete(); - worker->Destroy(); -} - -// Runs Execute of the scheduled AyncWorker on the dedicated libgit2 thread / -// event loop, and schedules the WorkComplete callback to run on the -// uv_default_loop event loop -NAN_INLINE void AsyncLibgit2Execute (void *vworker) { - // execute the worker - Nan::AsyncWorker *worker = static_cast(vworker); - worker->Execute(); -} - -// Schedules the AsyncWorker to run on the dedicated libgit2 thread / event loop, -// and on completion AsyncLibgit2Complete on the default loop -NAN_INLINE void AsyncLibgit2QueueWorker (Nan::AsyncWorker* worker) { - libgit2ThreadPool.QueueWork(AsyncLibgit2Execute, AsyncLibgit2Complete, worker); -} - -#endif diff --git a/generate/templates/manual/include/async_worker.h b/generate/templates/manual/include/async_worker.h new file mode 100644 index 0000000000..b8f2909901 --- /dev/null +++ b/generate/templates/manual/include/async_worker.h @@ -0,0 +1,91 @@ +#ifndef NODEGIT_ASYNC_WORKER +#define NODEGIT_ASYNC_WORKER + +#include +#include +#include +#include +#include "lock_master.h" +#include "cleanup_handle.h" + +namespace nodegit { + class AsyncWorker : public Nan::AsyncWorker { + public: + AsyncWorker(Nan::Callback *callback, const char *resourceName, std::map> &cleanupHandles); + AsyncWorker(Nan::Callback *callback, const char *resourceName); + AsyncWorker(const AsyncWorker &) = delete; + AsyncWorker(AsyncWorker &&) = delete; + AsyncWorker &operator=(const AsyncWorker &) = delete; + AsyncWorker &operator=(AsyncWorker &&) = delete; + + // This must be implemented by every async worker + // so that the thread pool can lock separately + // from the execute method in the AsyncWorker + virtual nodegit::LockMaster AcquireLocks() = 0; + + // Ensure that the `HandleErrorCallback` will be called + // when the AsyncWork is complete + void Cancel(); + + // Retrieves the async resource attached to this AsyncWorker + // This is used to inform libgit2 callbacks what asyncResource + // they should use when working with any javascript + Nan::AsyncResource *GetAsyncResource(); + + Nan::Global *GetCallbackErrorHandle(); + + bool GetIsCancelled() const; + + void Destroy() override; + + void RegisterCleanupCall(std::function cleanupCall); + + template + void Reference(v8::Local item) { + if (item->IsFunction() || item->IsString() || item->IsNull() || item->IsUndefined()) { + return; + } + + auto objectWrapPointer = Nan::ObjectWrap::Unwrap(item.As()); + objectWrapPointer->Reference(); + RegisterCleanupCall([objectWrapPointer]() { + objectWrapPointer->Unreference(); + }); + } + + template + inline void Reference(const char *label, v8::Local item) { + SaveToPersistent(label, item); + Reference(item); + } + + template + inline void Reference(const char *label, v8::Local item) { + SaveToPersistent(label, item); + Reference(item); + } + + template + inline void Reference(const char *label, v8::Local array) { + SaveToPersistent(label, array); + for (uint32_t i = 0; i < array->Length(); ++i) { + Reference(Nan::Get(array, i).ToLocalChecked()); + } + } + + inline void Reference(const char *label, v8::Local item) { + SaveToPersistent(label, item); + } + + protected: + std::map> cleanupHandles; + Nan::Global callbackErrorHandle; + + private: + std::vector> cleanupCalls; + bool isCancelled = false; + + }; +} + +#endif diff --git a/generate/templates/manual/include/callback_wrapper.h b/generate/templates/manual/include/callback_wrapper.h index 0f655ed181..d50b976105 100644 --- a/generate/templates/manual/include/callback_wrapper.h +++ b/generate/templates/manual/include/callback_wrapper.h @@ -3,15 +3,16 @@ #include #include +#include using namespace v8; using namespace node; class CallbackWrapper { - Nan::Callback* jsCallback; + std::unique_ptr jsCallback; // throttling data, used for callbacks that need to be throttled - int throttle; // in milliseconds - if > 0, calls to the JS callback will be throttled + uint32_t throttle; // in milliseconds - if > 0, calls to the JS callback will be throttled uint64_t lastCallTime; // false will trigger the callback and not wait for the callback to finish @@ -20,29 +21,23 @@ class CallbackWrapper { bool waitForResult; public: - CallbackWrapper() { - jsCallback = NULL; - lastCallTime = 0; - throttle = 0; - } + CallbackWrapper(): jsCallback(nullptr), throttle(0), lastCallTime(0) {} - ~CallbackWrapper() { - SetCallback(NULL); - } + CallbackWrapper(const CallbackWrapper &) = delete; + CallbackWrapper(CallbackWrapper &&) = delete; + CallbackWrapper &operator=(const CallbackWrapper &) = delete; + CallbackWrapper &operator=(CallbackWrapper &&) = delete; bool HasCallback() { - return jsCallback != NULL; + return jsCallback != nullptr; } Nan::Callback* GetCallback() { - return jsCallback; + return jsCallback.get(); } - void SetCallback(Nan::Callback* callback, int throttle = 0, bool waitForResult = true) { - if(jsCallback) { - delete jsCallback; - } - jsCallback = callback; + void SetCallback(std::unique_ptr callback, uint32_t throttle = 0, bool waitForResult = true) { + jsCallback = std::move(callback); this->throttle = throttle; this->waitForResult = waitForResult; } diff --git a/generate/templates/manual/include/cleanup_handle.h b/generate/templates/manual/include/cleanup_handle.h new file mode 100644 index 0000000000..5eca8cf70d --- /dev/null +++ b/generate/templates/manual/include/cleanup_handle.h @@ -0,0 +1,21 @@ +#ifndef NODEGIT_CLEANUP_HANDLE_H +#define NODEGIT_CLEANUP_HANDLE_H + +#include +#include +#include + +namespace nodegit { + class CleanupHandle { + public: + CleanupHandle(); + virtual ~CleanupHandle(); + }; + + class FilterRegistryCleanupHandles : public CleanupHandle { + public: + std::map> registeredFilters; + }; +} + +#endif diff --git a/generate/templates/manual/include/configurable_class_wrapper.h b/generate/templates/manual/include/configurable_class_wrapper.h new file mode 100644 index 0000000000..ff83a3176d --- /dev/null +++ b/generate/templates/manual/include/configurable_class_wrapper.h @@ -0,0 +1,59 @@ +#ifndef CALLER_CONFIGURABLE_CLASS_WRAPPER_H +#define CALLER_CONFIGURABLE_CLASS_WRAPPER_H + +#include +#include +#include + +#include "cleanup_handle.h" + +namespace nodegit { + class Context; + + template + class ConfigurableClassWrapper : public CleanupHandle { + public: + typedef typename Traits::cType cType; + typedef typename Traits::configurableCppClass configurableCppClass; + + struct v8ConversionResult { + v8ConversionResult(std::string _error) + : error(std::move(_error)), result(nullptr) + {} + + v8ConversionResult(std::shared_ptr _result) + : result(std::move(_result)) + {} + + std::string error; + std::shared_ptr result; + }; + + // We copy the entity + ConfigurableClassWrapper(nodegit::Context *_nodeGitContext) + : nodegitContext(_nodeGitContext), raw(nullptr) {} + + ConfigurableClassWrapper(const ConfigurableClassWrapper &) = delete; + ConfigurableClassWrapper(ConfigurableClassWrapper &&) = delete; + ConfigurableClassWrapper &operator=(const ConfigurableClassWrapper &) = delete; + ConfigurableClassWrapper &operator=(ConfigurableClassWrapper &&) = delete; + + virtual ~ConfigurableClassWrapper() { + if (raw != nullptr) { + free(raw); + raw = nullptr; + } + } + + const Context *nodegitContext = nullptr; + cType *GetValue() { + return raw; + } + + protected: + cType *raw; + std::vector> childCleanupVector; + }; +} + +#endif diff --git a/generate/templates/manual/include/context.h b/generate/templates/manual/include/context.h new file mode 100644 index 0000000000..11e8b93f8a --- /dev/null +++ b/generate/templates/manual/include/context.h @@ -0,0 +1,90 @@ +#ifndef NODEGIT_CONTEXT +#define NODEGIT_CONTEXT + +#include +#include +#include +#include +#include +#include + +#include "async_worker.h" +#include "cleanup_handle.h" +#include "thread_pool.h" +#include "tracker_wrap.h" + +namespace nodegit { + class AsyncContextCleanupHandle; + class Context { + public: + Context(v8::Isolate *isolate); + Context(const Context &) = delete; + Context(Context &&) = delete; + Context &operator=(const Context &) = delete; + Context &operator=(Context &&) = delete; + + ~Context(); + + static Context *GetCurrentContext(); + + v8::Local GetFromPersistent(std::string key); + + void QueueWorker(nodegit::AsyncWorker *worker); + + void SaveToPersistent(std::string key, const v8::Local &value); + + void SaveCleanupHandle(std::string key, std::shared_ptr cleanupHandle); + + std::shared_ptr GetCleanupHandle(std::string key); + + std::shared_ptr RemoveCleanupHandle(std::string key); + + void ShutdownThreadPool(std::unique_ptr cleanupHandle); + + inline void LinkTrackerList(nodegit::TrackerWrap::TrackerList *list) { + list->Link(&trackerList); + } + + inline int TrackerListSize() { + return nodegit::TrackerWrap::SizeFromList(&trackerList); + } + + private: + v8::Isolate *isolate; + + ThreadPool threadPool; + + // This map contains persistent handles that need to be cleaned up + // after the context has been torn down. + // Often this is used as a context-aware storage cell for `*::InitializeComponent` + // to store function templates on them. + Nan::Global persistentStorage; + + std::map> cleanupHandles; + + nodegit::TrackerWrap::TrackerList trackerList; + + static std::map contexts; + }; + + class AsyncContextCleanupHandle { + public: + AsyncContextCleanupHandle(const AsyncContextCleanupHandle &) = delete; + AsyncContextCleanupHandle(AsyncContextCleanupHandle &&) = delete; + AsyncContextCleanupHandle &operator=(const AsyncContextCleanupHandle &) = delete; + AsyncContextCleanupHandle &operator=(AsyncContextCleanupHandle &&) = delete; + ~AsyncContextCleanupHandle(); + + private: + static void AsyncCleanupContext(void *data, void (*uvCallback)(void *), void *uvCallbackData); + + friend class Context; + AsyncContextCleanupHandle(v8::Isolate *isolate, Context *context); + Context *context; + node::AsyncCleanupHookHandle handle; + void (*doneCallback)(void *); + void *doneData; + }; +} + +#endif diff --git a/generate/templates/manual/include/convenient_hunk.h b/generate/templates/manual/include/convenient_hunk.h index 37e9ab1118..c45da7368f 100644 --- a/generate/templates/manual/include/convenient_hunk.h +++ b/generate/templates/manual/include/convenient_hunk.h @@ -5,6 +5,8 @@ #include #include "async_baton.h" +#include "async_worker.h" +#include "lock_master.h" #include "promise_completion.h" extern "C" { @@ -26,8 +28,7 @@ using namespace v8; class ConvenientHunk : public Nan::ObjectWrap { public: - static Nan::Persistent constructor_template; - static void InitializeComponent (v8::Local target); + static void InitializeComponent (v8::Local target, nodegit::Context *nodegitContext); static v8::Local New(void *raw); @@ -35,8 +36,15 @@ class ConvenientHunk : public Nan::ObjectWrap { char *GetHeader(); size_t GetSize(); + void Reference(); + void Unreference(); + private: ConvenientHunk(HunkData *hunk); + ConvenientHunk(const ConvenientHunk &) = delete; + ConvenientHunk(ConvenientHunk &&) = delete; + ConvenientHunk &operator=(const ConvenientHunk &) = delete; + ConvenientHunk &operator=(ConvenientHunk &&) = delete; ~ConvenientHunk(); HunkData *hunk; @@ -55,16 +63,22 @@ class ConvenientHunk : public Nan::ObjectWrap { HunkData *hunk; std::vector *lines; }; - class LinesWorker : public Nan::AsyncWorker { + class LinesWorker : public nodegit::AsyncWorker { public: LinesWorker( LinesBaton *_baton, Nan::Callback *callback - ) : Nan::AsyncWorker(callback) + ) : nodegit::AsyncWorker(callback, "nodegit:AsyncWorker:ConvenientHunk:Lines") , baton(_baton) {}; - ~LinesWorker() {}; + LinesWorker(const LinesWorker &) = delete; + LinesWorker(LinesWorker &&) = delete; + LinesWorker &operator=(const LinesWorker &) = delete; + LinesWorker &operator=(LinesWorker &&) = delete; + ~LinesWorker(){}; void Execute(); + void HandleErrorCallback(); void HandleOKCallback(); + nodegit::LockMaster AcquireLocks(); private: LinesBaton *baton; diff --git a/generate/templates/manual/include/convenient_patch.h b/generate/templates/manual/include/convenient_patch.h index 9d6921ef81..a894765694 100644 --- a/generate/templates/manual/include/convenient_patch.h +++ b/generate/templates/manual/include/convenient_patch.h @@ -5,6 +5,8 @@ #include #include "async_baton.h" +#include "async_worker.h" +#include "lock_master.h" #include "promise_completion.h" extern "C" { @@ -37,8 +39,12 @@ using namespace v8; class ConvenientPatch : public Nan::ObjectWrap { public: - static Nan::Persistent constructor_template; - static void InitializeComponent (v8::Local target); + ConvenientPatch(const ConvenientPatch &) = delete; + ConvenientPatch(ConvenientPatch &&) = delete; + ConvenientPatch &operator=(const ConvenientPatch &) = delete; + ConvenientPatch &operator=(ConvenientPatch &&) = delete; + + static void InitializeComponent(v8::Local target, nodegit::Context *nodegitContext); static v8::Local New(void *raw); @@ -49,6 +55,9 @@ class ConvenientPatch : public Nan::ObjectWrap { size_t GetNumHunks(); PatchData *GetValue(); + void Reference(); + void Unreference(); + private: ConvenientPatch(PatchData *raw); ~ConvenientPatch(); @@ -67,16 +76,22 @@ class ConvenientPatch : public Nan::ObjectWrap { PatchData *patch; std::vector *hunks; }; - class HunksWorker : public Nan::AsyncWorker { + class HunksWorker : public nodegit::AsyncWorker { public: HunksWorker( HunksBaton *_baton, Nan::Callback *callback - ) : Nan::AsyncWorker(callback) + ) : nodegit::AsyncWorker(callback, "nodegit:AsyncWorker:ConvenientPatch:Hunks") , baton(_baton) {}; - ~HunksWorker() {}; + HunksWorker(const HunksWorker &) = delete; + HunksWorker(HunksWorker &&) = delete; + HunksWorker &operator=(const HunksWorker &) = delete; + HunksWorker &operator=(HunksWorker &&) = delete; + ~HunksWorker(){}; void Execute(); + void HandleErrorCallback(); void HandleOKCallback(); + nodegit::LockMaster AcquireLocks(); private: HunksBaton *baton; diff --git a/generate/templates/manual/include/filter_registry.h b/generate/templates/manual/include/filter_registry.h index b75938218b..ca98d972e4 100644 --- a/generate/templates/manual/include/filter_registry.h +++ b/generate/templates/manual/include/filter_registry.h @@ -2,10 +2,13 @@ #define GITFILTERREGISTRY_H #include #include -#include #include #include "async_baton.h" +#include "async_worker.h" +#include "cleanup_handle.h" +#include "context.h" +#include "lock_master.h" #include "nodegit_wrapper.h" #include "promise_completion.h" @@ -23,12 +26,10 @@ using namespace v8; class GitFilterRegistry : public Nan::ObjectWrap { public: - static void InitializeComponent(v8::Local target); - - static Nan::Persistent persistentHandle; + static void InitializeComponent(v8::Local target, nodegit::Context *nodegitContext); private: - + static NAN_METHOD(GitFilterRegister); static NAN_METHOD(GitFilterUnregister); @@ -47,25 +48,37 @@ class GitFilterRegistry : public Nan::ObjectWrap { int error_code; }; - class RegisterWorker : public Nan::AsyncWorker { + class RegisterWorker : public nodegit::AsyncWorker { public: - RegisterWorker(FilterRegisterBaton *_baton, Nan::Callback *callback) - : Nan::AsyncWorker(callback), baton(_baton) {}; + RegisterWorker(FilterRegisterBaton *_baton, Nan::Callback *callback, std::map> &cleanupHandles) + : nodegit::AsyncWorker(callback, "nodegit:AsyncWorker:FilterRegistry:Register", cleanupHandles), baton(_baton) {}; + RegisterWorker(const RegisterWorker &) = delete; + RegisterWorker(RegisterWorker &&) = delete; + RegisterWorker &operator=(const RegisterWorker &) = delete; + RegisterWorker &operator=(RegisterWorker &&) = delete; ~RegisterWorker() {}; void Execute(); + void HandleErrorCallback(); void HandleOKCallback(); + nodegit::LockMaster AcquireLocks(); private: FilterRegisterBaton *baton; }; - class UnregisterWorker : public Nan::AsyncWorker { + class UnregisterWorker : public nodegit::AsyncWorker { public: - UnregisterWorker(FilterUnregisterBaton *_baton, Nan::Callback *callback) - : Nan::AsyncWorker(callback), baton(_baton) {}; + UnregisterWorker(FilterUnregisterBaton *_baton, Nan::Callback *callback) + : nodegit::AsyncWorker(callback, "nodegit:AsyncWorker:FilterRegistry:Unregister"), baton(_baton) {}; + UnregisterWorker(const UnregisterWorker &) = delete; + UnregisterWorker(UnregisterWorker &&) = delete; + UnregisterWorker &operator=(const UnregisterWorker &) = delete; + UnregisterWorker &operator=(UnregisterWorker &&) = delete; ~UnregisterWorker() {}; void Execute(); + void HandleErrorCallback(); void HandleOKCallback(); + nodegit::LockMaster AcquireLocks(); private: FilterUnregisterBaton *baton; diff --git a/generate/templates/manual/include/lock_master.h b/generate/templates/manual/include/lock_master.h index fde38825b7..0cd313b60d 100644 --- a/generate/templates/manual/include/lock_master.h +++ b/generate/templates/manual/include/lock_master.h @@ -3,199 +3,180 @@ #include -class LockMasterImpl; - -class LockMaster { -public: - enum Status { - Disabled = 0, - EnabledForAsyncOnly, - Enabled - }; - -private: - static Status status; - - LockMasterImpl *impl; - - template - void AddLocks(const T *t) { - // by default, don't lock anything - } +namespace nodegit { + class LockMasterImpl; - // base case for variadic template unwinding - void AddParameters() { - } + class LockMaster { + private: + LockMasterImpl *impl; - // processes a single parameter, then calls recursively on the rest - template - void AddParameters(const T *t, const Types*... args) { - if(t) { - AddLocks(t); + template + void AddLocks(const T *t) { + // by default, don't lock anything } - AddParameters(args...); - } - void ConstructorImpl(); - void DestructorImpl(); - void ObjectToLock(const void *); - void ObjectsToLockAdded(); -public: - - // we lock on construction - template LockMaster(bool asyncAction, const Types*... types) { - if((status == Disabled) || ((status == EnabledForAsyncOnly) && !asyncAction)) { - impl = NULL; - return; + // base case for variadic template unwinding + void AddParameters() { } - ConstructorImpl(); - AddParameters(types...); - ObjectsToLockAdded(); - } - - // and unlock on destruction - ~LockMaster() { - if(!impl) { - return; + // processes a single parameter, then calls recursively on the rest + template + void AddParameters(const T *t, const Types*... args) { + if(t) { + AddLocks(t); + } + AddParameters(args...); } - DestructorImpl(); - } - - // TemporaryUnlock unlocks the LockMaster currently registered on the thread, - // and re-locks it on destruction. - class TemporaryUnlock { - LockMasterImpl *impl; void ConstructorImpl(); void DestructorImpl(); + void ObjectToLock(const void *); + void ObjectsToLockAdded(); public: - TemporaryUnlock() { - // We can't return here if disabled - // It's possible that a LockMaster was fully constructed and registered - // before the thread safety was disabled. - // So we rely on ConstructorImpl to abort if there is no registered LockMaster + + // we lock on construction + template LockMaster(bool asyncAction, const Types*... types) { + if(!asyncAction) { + impl = nullptr; + return; + } + ConstructorImpl(); + AddParameters(types...); + ObjectsToLockAdded(); } - ~TemporaryUnlock() { + + // we don't want this object to be copyable, there can only be one lock holder + LockMaster(const LockMaster &other) = delete; + + LockMaster &operator=(const LockMaster &other) = delete; + + // expose a move constructor so that LockMaster can be returned + LockMaster(LockMaster &&other); + + LockMaster &operator=(LockMaster &&other); + + // and unlock on destruction + ~LockMaster() { if(!impl) { return; } DestructorImpl(); } - }; - static void Initialize(); + // TemporaryUnlock unlocks the LockMaster currently registered on the thread, + // and re-locks it on destruction. + class TemporaryUnlock { + LockMasterImpl *impl; + + void ConstructorImpl(); + void DestructorImpl(); + public: + TemporaryUnlock() { + // We can't return here if disabled + // It's possible that a LockMaster was fully constructed and registered + // before the thread safety was disabled. + // So we rely on ConstructorImpl to abort if there is no registered LockMaster + ConstructorImpl(); + } + TemporaryUnlock(const TemporaryUnlock &) = delete; + TemporaryUnlock(TemporaryUnlock &&) = delete; + TemporaryUnlock &operator=(const TemporaryUnlock &) = delete; + TemporaryUnlock &operator=(TemporaryUnlock &&) = delete; + ~TemporaryUnlock() { + if(!impl) { + return; + } + DestructorImpl(); + } + }; - // Enables the thread safety system - static void Enable() { - status = Enabled; - } + static void InitializeGlobal(); + static void InitializeContext(); + }; - static void SetStatus(Status status) { - LockMaster::status = status; - } - static void Disable() { - status = Disabled; + template<> inline void LockMaster::AddLocks(const git_repository *repo) { + // when using a repo, lock the repo + ObjectToLock(repo); } - static Status GetStatus() { - return status; + template<> inline void LockMaster::AddLocks(const git_index *index) { + // when using an index, lock the repo, or if there isn't one lock the index + const void *owner = git_index_owner(index); + if(!owner) { + owner = index; + } + ObjectToLock(owner); } - // Diagnostic information that can be provided to the JavaScript layer - // for a minimal level of testing - struct Diagnostics { - // this counts all stored mutexes - even if they are unlocked: - int storedMutexesCount; - }; - - static Diagnostics GetDiagnostics(); -}; - - -template<> inline void LockMaster::AddLocks(const git_repository *repo) { - // when using a repo, lock the repo - ObjectToLock(repo); -} - -template<> inline void LockMaster::AddLocks(const git_index *index) { - // when using an index, lock the repo, or if there isn't one lock the index - const void *owner = git_index_owner(index); - if(!owner) { - owner = index; + template<> inline void LockMaster::AddLocks(const git_commit *commit) { + // when using a commit, lock the repo + const void *owner = git_commit_owner(commit); + ObjectToLock(owner); } - ObjectToLock(owner); -} -template<> inline void LockMaster::AddLocks(const git_commit *commit) { - // when using a commit, lock the repo - const void *owner = git_commit_owner(commit); - ObjectToLock(owner); + // ... more locking rules would go here. According to an analysis of idefs.json, + // the following types are passed as non-const * and may require locking + // (some likely, some probably not): + // 'git_annotated_commit', + // 'git_blame_options', + // 'git_blob', + // 'git_buf', + // 'git_checkout_options', + // 'git_cherrypick_options', + // 'git_clone_options', + // 'git_commit', + // 'git_config', + // 'git_diff', + // 'git_diff_perfdata', + // 'git_error', + // 'git_fetch_options', + // 'git_fetch_options', + // 'git_filter', + // 'git_filter_list', + // 'git_hashsig', + // 'git_index', + // 'git_merge_file_input', + // 'git_merge_options', + // 'git_merge_options', + // 'git_note', + // 'git_note_iterator', + // 'git_object', + // 'git_odb', + // 'git_odb_object', + // 'git_oid', + // 'git_oidarray', + // 'git_packbuilder', + // 'git_patch', + // 'git_pathspec', + // 'git_push_options', + // 'git_rebase', + // 'git_rebase_options', + // 'git_refdb', + // 'git_reference', + // 'git_reflog', + // 'git_remote', + // 'git_remote_callbacks', + // 'git_remote_callbacks', + // 'git_repository', + // 'git_repository_init_options', + // 'git_revwalk', + // 'git_signature', + // 'git_stash_apply_options', + // 'git_status_list', + // 'git_strarray', + // 'git_submodule', + // 'git_submodule_update_options', + // 'git_tag', + // 'git_transfer_progress', + // 'git_transport', + // 'git_tree', + // 'git_treebuilder', + // 'git_writestream' + // + // Other types are always passed as const * and perhaps don't require locking + // (it's not a guarantee though) } -// ... more locking rules would go here. According to an analysis of idefs.json, -// the following types are passed as non-const * and may require locking -// (some likely, some probably not): -// 'git_annotated_commit', -// 'git_blame_options', -// 'git_blob', -// 'git_buf', -// 'git_checkout_options', -// 'git_cherrypick_options', -// 'git_clone_options', -// 'git_commit', -// 'git_config', -// 'git_diff', -// 'git_diff_perfdata', -// 'git_error', -// 'git_fetch_options', -// 'git_fetch_options', -// 'git_filter', -// 'git_filter_list', -// 'git_hashsig', -// 'git_index', -// 'git_merge_file_input', -// 'git_merge_options', -// 'git_merge_options', -// 'git_note', -// 'git_note_iterator', -// 'git_object', -// 'git_odb', -// 'git_odb_object', -// 'git_oid', -// 'git_oidarray', -// 'git_packbuilder', -// 'git_patch', -// 'git_pathspec', -// 'git_push_options', -// 'git_rebase', -// 'git_rebase_options', -// 'git_refdb', -// 'git_reference', -// 'git_reflog', -// 'git_remote', -// 'git_remote_callbacks', -// 'git_remote_callbacks', -// 'git_repository', -// 'git_repository_init_options', -// 'git_revwalk', -// 'git_signature', -// 'git_stash_apply_options', -// 'git_status_list', -// 'git_strarray', -// 'git_submodule', -// 'git_submodule_update_options', -// 'git_tag', -// 'git_transfer_progress', -// 'git_transport', -// 'git_tree', -// 'git_treebuilder', -// 'git_writestream' -// -// Other types are always passed as const * and perhaps don't require locking -// (it's not a guarantee though) - - #endif diff --git a/generate/templates/manual/include/nodegit.h b/generate/templates/manual/include/nodegit.h index a9cef2950a..bab3e4179b 100644 --- a/generate/templates/manual/include/nodegit.h +++ b/generate/templates/manual/include/nodegit.h @@ -1,10 +1,6 @@ #ifndef NODEGIT_H #define NODEGIT_H -#include "thread_pool.h" - -extern ThreadPool libgit2ThreadPool; - v8::Local GetPrivate(v8::Local object, v8::Local key); diff --git a/generate/templates/manual/include/nodegit_wrapper.h b/generate/templates/manual/include/nodegit_wrapper.h index c40b7af1db..c72f29027d 100644 --- a/generate/templates/manual/include/nodegit_wrapper.h +++ b/generate/templates/manual/include/nodegit_wrapper.h @@ -1,9 +1,12 @@ #ifndef NODEGIT_WRAPPER_H #define NODEGIT_WRAPPER_H -#include +#include #include +#include "tracker_wrap.h" +#include "cleanup_handle.h" + // the Traits template parameter supplies: // typename cppClass - the C++ type of the NodeGit wrapper (e.g. GitRepository) // typename cType - the C type of the libgit2 object being wrapped (e.g. git_repository) @@ -13,9 +16,16 @@ // // static const bool isFreeable // static void free(cType *raw) - frees the object using freeFunctionName +// +// nodegit::TrackerWrap allows for cheap tracking of new objects, avoiding searchs +// in a container to remove the tracking of a specific object. + +namespace nodegit { + class Context; +} template -class NodeGitWrapper : public Nan::ObjectWrap { +class NodeGitWrapper : public nodegit::TrackerWrap { public: // replicate Traits typedefs for ease of use typedef typename Traits::cType cType; @@ -29,25 +39,31 @@ class NodeGitWrapper : public Nan::ObjectWrap { // (and through a method) instead of changing selfFreeing, but that's // a separate issue. bool selfFreeing; + + nodegit::Context *nodegitContext = nullptr; + protected: cType *raw; + std::vector> childCleanupVector; // owner of the object, in the memory management sense. only populated // when using ownedByThis, and the type doesn't have a dupFunction // CopyablePersistentTraits are used to get the reset-on-destruct behavior. Nan::Persistent > owner; - static Nan::Persistent constructor_template; - // diagnostic count of self-freeing object instances - static int SelfFreeingInstanceCount; + thread_local static int SelfFreeingInstanceCount; // diagnostic count of constructed non-self-freeing object instances - static int NonSelfFreeingConstructedCount; + thread_local static int NonSelfFreeingConstructedCount; static void InitializeTemplate(v8::Local &tpl); NodeGitWrapper(cType *raw, bool selfFreeing, v8::Local owner); NodeGitWrapper(const char *error); // calls ThrowError + NodeGitWrapper(const NodeGitWrapper &) = delete; + NodeGitWrapper(NodeGitWrapper &&) = delete; + NodeGitWrapper &operator=(const NodeGitWrapper &) = delete; + NodeGitWrapper &operator=(NodeGitWrapper &&) = delete; ~NodeGitWrapper(); static NAN_METHOD(JSNewFunction); @@ -55,11 +71,24 @@ class NodeGitWrapper : public Nan::ObjectWrap { static NAN_METHOD(GetSelfFreeingInstanceCount); static NAN_METHOD(GetNonSelfFreeingConstructedCount); + void SetNativeOwners(v8::Local owners); + public: static v8::Local New(const cType *raw, bool selfFreeing, v8::Local owner = v8::Local()); + void SaveCleanupHandle(std::shared_ptr cleanupHandle); + + void Reference(); + void Unreference(); + + void AddReferenceCallbacks(size_t, std::function, std::function); + cType *GetValue(); void ClearValue(); + +private: + std::unordered_map> referenceCallbacks; + std::unordered_map> unreferenceCallbacks; }; #endif diff --git a/generate/templates/manual/include/promise_completion.h b/generate/templates/manual/include/promise_completion.h index 600fc06178..da933b7de3 100644 --- a/generate/templates/manual/include/promise_completion.h +++ b/generate/templates/manual/include/promise_completion.h @@ -4,6 +4,7 @@ #include #include "async_baton.h" +#include "context.h" // PromiseCompletion forwards either the resolved result or the rejection reason // to the native layer, once the promise completes @@ -14,33 +15,28 @@ class PromiseCompletion : public Nan::ObjectWrap { // callback type called when a promise completes - typedef void (*Callback) (bool isFulfilled, AsyncBaton *baton, v8::Local resultOfPromise); + typedef void (*Callback) (bool isFulfilled, nodegit::AsyncBaton *baton, v8::Local resultOfPromise); static NAN_METHOD(New); static NAN_METHOD(PromiseFulfilled); static NAN_METHOD(PromiseRejected); - // persistent handles for NAN_METHODs - static Nan::Persistent newFn; - static Nan::Persistent promiseFulfilled; - static Nan::Persistent promiseRejected; - - static v8::Local Bind(Nan::Persistent &method, v8::Local object); + static v8::Local Bind(v8::Local method, v8::Local object); static void CallCallback(bool isFulfilled, const Nan::FunctionCallbackInfo &info); // callback and baton stored for the promise that this PromiseCompletion is // attached to. when the promise completes, the callback will be called with // the result, and the stored baton. Callback callback; - AsyncBaton *baton; + nodegit::AsyncBaton *baton; - void Setup(v8::Local thenFn, v8::Local result, AsyncBaton *baton, Callback callback); + void Setup(v8::Local thenFn, v8::Local result, nodegit::AsyncBaton *baton, Callback callback); public: // If result is a promise, this will instantiate a new PromiseCompletion // and have it forward the promise result / reason via the baton and callback - static bool ForwardIfPromise(v8::Local result, AsyncBaton *baton, Callback callback); + static bool ForwardIfPromise(v8::Local result, nodegit::AsyncBaton *baton, Callback callback); - static void InitializeComponent(); + static void InitializeComponent(nodegit::Context *nodegitContext); }; #endif diff --git a/generate/templates/manual/include/str_array_converter.h b/generate/templates/manual/include/str_array_converter.h index 37f1bcc1dc..d5edd61872 100644 --- a/generate/templates/manual/include/str_array_converter.h +++ b/generate/templates/manual/include/str_array_converter.h @@ -12,10 +12,12 @@ class StrArrayConverter { public: static git_strarray *Convert (v8::Local val); + static void ConvertInto(git_strarray *out, v8::Local val); + static void ConvertInto(git_strarray *out, v8::Local val); private: - static git_strarray *ConvertArray(Array *val); - static git_strarray *ConvertString(v8::Local val); + static git_strarray *ConvertArray(v8::Local val); + static git_strarray *ConvertString(v8::Local val); static git_strarray *AllocStrArray(const size_t count); static git_strarray *ConstructStrArray(int argc, char** argv); }; diff --git a/generate/templates/manual/include/thread_pool.h b/generate/templates/manual/include/thread_pool.h index 8a346028df..4653e80795 100644 --- a/generate/templates/manual/include/thread_pool.h +++ b/generate/templates/manual/include/thread_pool.h @@ -1,63 +1,84 @@ #ifndef THREAD_POOL_H #define THREAD_POOL_H +#include +#include +#include #include -#include -class ThreadPool { -public: - typedef void (*Callback) (void *); +#include "async_worker.h" -private: - struct Work { - Callback workCallback; - Callback completionCallback; - void *data; +// Temporary workaround for LFS checkout. Comment added to be reverted. +// With the threadpool rewrite, a Worker will execute its callbacks with +// objects temporary unlock (to prevent deadlocks), and we'll wait until +// the callback is done to lock them back again (to make sure it's thread-safe). +// LFS checkout lost performance after this, and the proper way to fix it is +// to integrate nodegit-lfs into nodegit. Until this is implemented, a +// temporary workaround has been applied, which affects only Workers leveraging +// threaded libgit2 functions (at the moment only checkout) and does the +// following: +// - do not wait for the current callback to end, so that it can send the +// next callback to the main JS thread. +// - do not temporary unlock the objects, since they would be locked back +// again before the callback is executed. - Work(Callback workCallback, Callback completionCallback, void *data) - : workCallback(workCallback), completionCallback(completionCallback), data(data) { - } - }; +namespace nodegit { + class Context; + class AsyncContextCleanupHandle; + class ThreadPoolImpl; - struct LoopCallback { - Callback callback; - void *data; - bool isWork; + class ThreadPool { + public: + typedef std::function Callback; + typedef std::function QueueCallbackFn; + // Temporary workaround for LFS checkout. Code modified to be reverted. + // typedef std::function OnPostCallbackFn; + typedef std::function OnPostCallbackFn; - LoopCallback(Callback callback, void *data, bool isWork) - : callback(callback), data(data), isWork(isWork) { - } - }; + // Initializes thread pool and spins up the requested number of threads + // The provided loop will be used for completion callbacks, whenever + // queued work is completed + ThreadPool(int numberOfThreads, uv_loop_t *loop, nodegit::Context *context); + + ThreadPool(const ThreadPool &) = delete; + ThreadPool(ThreadPool &&) = delete; + ThreadPool &operator=(const ThreadPool &) = delete; + ThreadPool &operator=(ThreadPool &&) = delete; + + ~ThreadPool(); + + // Queues work on the thread pool, followed by completion call scheduled + // on the loop provided in the constructor. + // QueueWork should be called on the loop provided in the constructor. + void QueueWorker(nodegit::AsyncWorker *worker); + + // When an AsyncWorker is being executed, the threads involved in executing + // will ensure that this is set to the AsyncResource belonging to the AsyncWorker. + // This ensures that any callbacks from libgit2 take the correct AsyncResource + // when scheduling work on the JS thread. + static Nan::AsyncResource *GetCurrentAsyncResource(); - // work to be performed on the threadpool - std::queue workQueue; - uv_mutex_t workMutex; - uv_sem_t workSemaphore; - int workInProgressCount; - - // completion and async callbacks to be performed on the loop - std::queue loopQueue; - uv_mutex_t loopMutex; - uv_async_t loopAsync; - - static void RunEventQueue(void *threadPool); - void RunEventQueue(); - static void RunLoopCallbacks(uv_async_t* handle); - void RunLoopCallbacks(); - - void QueueLoopCallback(Callback callback, void *data, bool isWork); - -public: - // Initializes thread pool and spins up the requested number of threads - // The provided loop will be used for completion callbacks, whenever - // queued work is completed - ThreadPool(int numberOfThreads, uv_loop_t *loop); - // Queues work on the thread pool, followed by completion call scheduled - // on the loop provided in the constructor. - // QueueWork should be called on the loop provided in the constructor. - void QueueWork(Callback workCallback, Callback completionCallback, void *data); - // Queues a callback on the loop provided in the constructor - void ExecuteReverseCallback(Callback reverseCallback, void *data); -}; + // Same as GetCurrentAsyncResource, except used to ensure callbacks occur + // in the correct context. + static const nodegit::Context *GetCurrentContext(); + + // Same as GetCurrentAsyncResource, except used for callbacks to store errors + // for use after completion of async work + static Nan::Global *GetCurrentCallbackErrorHandle(); + + // Queues a callback on the loop provided in the constructor + static void PostCallbackEvent(OnPostCallbackFn onPostCallback); + + // Called once at libgit2 initialization to setup contracts with libgit2 + static void InitializeGlobal(); + + // Will asynchronously shutdown the thread pool + // It will also clean up any resources that the thread pool is keeping alive + void Shutdown(std::unique_ptr cleanupHandle); + + private: + std::unique_ptr impl; + }; +} #endif diff --git a/generate/templates/manual/include/tracker_wrap.h b/generate/templates/manual/include/tracker_wrap.h new file mode 100644 index 0000000000..3b15eba424 --- /dev/null +++ b/generate/templates/manual/include/tracker_wrap.h @@ -0,0 +1,76 @@ +#ifndef TRACKERWRAP_H +#define TRACKERWRAP_H + +#include +#include +#include + +namespace nodegit { + // Base class used to track wrapped objects, so that we can + // free the objects that were not freed at the time of context + // closing (because their WeakCallback didn't trigger. See + // https://github.com/nodejs/help/issues/3297). + // Implementation based on node.js's class RefTracker (napi). + class TrackerWrap : public Nan::ObjectWrap { + public: + TrackerWrap() = default; + virtual ~TrackerWrap() = default; + TrackerWrap(const TrackerWrap &other) = delete; + TrackerWrap(TrackerWrap &&other) = delete; + TrackerWrap& operator=(const TrackerWrap &other) = delete; + TrackerWrap& operator=(TrackerWrap &&other) = delete; + + // aliases: + // 'TrackerList': used in functionality related to a list. + // 'TrackerWrap' used in functionality not related to a list. + using TrackerList = TrackerWrap; + + // Links 'this' right after 'listStart' + inline void Link(TrackerList* listStart) { + m_prev = listStart; + m_next = listStart->m_next; + if (m_next != nullptr) { + m_next->m_prev = this; + } + listStart->m_next = this; + } + + // Unlinks itself from the list it's linked to + inline TrackerWrap* Unlink() { + if (m_prev != nullptr) { + m_prev->m_next = m_next; + } + if (m_next != nullptr) { + m_next->m_prev = m_prev; + } + m_prev = nullptr; + m_next = nullptr; + return this; + } + + inline void SetTrackerWrapOwners(std::unique_ptr< std::vector > &&owners) { + m_owners = std::move(owners); + } + + inline const std::vector* GetTrackerWrapOwners() const { + return m_owners.get(); + } + + // Unlinks and returns the first item of 'listStart' + static TrackerWrap* UnlinkFirst(TrackerList *listStart); + + // Returns number of items following 'listStart' + static int SizeFromList(TrackerList *listStart); + + // Deletes items following 'listStart', but not 'listStart' itself + static void DeleteFromList(TrackerList *listStart); + + private: + TrackerList* m_next {}; + TrackerList* m_prev {}; + // m_owners will store pointers to native objects + std::unique_ptr< std::vector > m_owners {}; + }; +} + +#endif diff --git a/generate/templates/manual/include/v8_helpers.h b/generate/templates/manual/include/v8_helpers.h new file mode 100644 index 0000000000..184b8d31b1 --- /dev/null +++ b/generate/templates/manual/include/v8_helpers.h @@ -0,0 +1,10 @@ +#ifndef NODEGIT_V8_HELPERS_H +#define NODEGIT_V8_HELPERS_H + +#include + +namespace nodegit { + v8::Local safeGetField(v8::Local &containerObj, std::string fieldName); +} + +#endif diff --git a/generate/templates/manual/include/worker_pool.h b/generate/templates/manual/include/worker_pool.h new file mode 100644 index 0000000000..3380c87060 --- /dev/null +++ b/generate/templates/manual/include/worker_pool.h @@ -0,0 +1,180 @@ +#ifndef WORK_POOL_H +#define WORK_POOL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * \class WorkItem + * Abstract class for work items in the WorkerPool. + */ +class WorkItem +{ +public: + WorkItem() = default; + virtual ~WorkItem() = default; + WorkItem(const WorkItem &other) = default; + WorkItem(WorkItem &&other) = default; + WorkItem& operator=(const WorkItem &other) = default; + WorkItem& operator=(WorkItem &&other) = default; +}; + +/** + * \class Worker + * Interface for Workers in the WorkerPool. + */ +class IWorker +{ +public: + IWorker() = default; + virtual ~IWorker() = default; + IWorker(const IWorker &other) = delete; + IWorker(IWorker &&other) = delete; + IWorker& operator=(const IWorker &other) = delete; + IWorker& operator=(IWorker &&other) = delete; + + virtual bool Initialize() = 0; + virtual bool Execute(std::unique_ptr &&work) = 0; +}; + +/* Enumeration describing the Worker Pool Status: +* - kOk: everything ok. +* - kInitializeFailed: a worker thread failed when calling Initialize(). +* - kExecuteFailed: a worker thread failed when calling Execute(). +* - kShutdownEarly: InsertWork() was called but the worker pool was stopped. +*/ +enum class WPStatus {kOk, kInitializeFailed, kExecuteFailed, kShutdownEarly}; + +/** + * \class WorkerPool + * To leverage this class, a Worker must inherit from IWorker. + * WorkItem is an abstract class from which to inherit too. + */ +template +class WorkerPool { +public: + WorkerPool(); + ~WorkerPool() = default; + WorkerPool(const WorkerPool &other) = delete; + WorkerPool(WorkerPool &&other) = delete; + WorkerPool& operator=(const WorkerPool &other) = delete; + WorkerPool& operator=(WorkerPool &&other) = delete; + + void Init(std::vector< std::shared_ptr > workers); + void InsertWork(std::unique_ptr &&work); + void Shutdown(); + WPStatus Status() { return m_atomicWPStatus; } + +private: + void DoWork(std::shared_ptr worker); + + std::mutex m_mutex {}; // locks m_workQueue and m_stop + std::condition_variable m_condition {}; + std::queue< std::unique_ptr > m_workQueue {}; + bool m_stop {true}; // initially the workpool has no worker threads + std::vector> m_threads {}; + std::atomic m_atomicWPStatus {WPStatus::kOk}; +}; + + +template +WorkerPool::WorkerPool() { + static_assert(std::is_base_of::value, "Worker must inherit from IWorker"); +} + +// launches the worker threads, if they hadn't been launched already +template +void WorkerPool::Init(std::vector< std::shared_ptr > workers) +{ + { + std::lock_guard lock(m_mutex); + if (!m_stop) + return; + m_stop = false; + } + + std::for_each (workers.begin(), workers.end(), [this](std::shared_ptr worker) { + m_threads.emplace_back(std::make_unique(std::bind(&WorkerPool::DoWork, this, worker))); + }); +} + +// queues work, or sets WPStatus::kShutdownEarly +template +void WorkerPool::InsertWork(std::unique_ptr &&work) +{ + { + std::lock_guard lock(m_mutex); + if (m_stop) { + m_atomicWPStatus = WPStatus::kShutdownEarly; + return; + } + m_workQueue.emplace(std::move(work)); + } + m_condition.notify_one(); +} + +template +void WorkerPool::Shutdown() +{ + { + std::lock_guard lock(m_mutex); + if (m_stop) { + return; + } + m_stop = true; + } + m_condition.notify_all(); + + std::for_each (m_threads.begin(), m_threads.end(), [](std::unique_ptr &wt) { + if (wt->joinable()) { + wt->join(); + } + }); +} + +template +void WorkerPool::DoWork(std::shared_ptr worker) +{ + if (!worker->Initialize()) { + m_atomicWPStatus = WPStatus::kInitializeFailed; + return; + } + + while (true) { + std::unique_ptr work {}; + { + std::unique_lock lock(m_mutex); + m_condition.wait(lock, [this] { + return this->m_stop || !this->m_workQueue.empty(); + }); + + // stop all workers if any of them failed on Initialize() or Execute() + // or the workerPool shutdown early + if (Status() != WPStatus::kOk) { + return; + } + + if (m_stop && m_workQueue.empty()) { + return; + } + + work = std::move(m_workQueue.front()); + m_workQueue.pop(); + } + + if (!worker->Execute(std::move(work))) { + m_atomicWPStatus = WPStatus::kExecuteFailed; + return; + } + } +} + +#endif // WORK_POOL_H + diff --git a/generate/templates/manual/include/wrapper.h b/generate/templates/manual/include/wrapper.h index 9dcbe31863..f24ce800ba 100644 --- a/generate/templates/manual/include/wrapper.h +++ b/generate/templates/manual/include/wrapper.h @@ -9,15 +9,14 @@ #include #include "nan.h" +#include "context.h" using namespace node; using namespace v8; class Wrapper : public Nan::ObjectWrap { public: - - static Nan::Persistent constructor_template; - static void InitializeComponent (v8::Local target); + static void InitializeComponent (v8::Local target, nodegit::Context *nodegitContext); void *GetValue(); static v8::Local New(const void *raw); diff --git a/generate/templates/manual/libgit2/opts.cc b/generate/templates/manual/libgit2/opts.cc new file mode 100644 index 0000000000..5829adc6cd --- /dev/null +++ b/generate/templates/manual/libgit2/opts.cc @@ -0,0 +1,117 @@ +NAN_METHOD(GitLibgit2::Opts) +{ + Nan::EscapableHandleScope scope; + + if (info.Length() == 0 || !info[0]->IsNumber()) { + return Nan::ThrowError("Number option is required."); + } + + const int from_option = (int)info[0].As()->Value(); + + git_error_clear(); + + v8::Local to = Nan::Undefined(); + switch (from_option) { + // GET size_t + case GIT_OPT_GET_MWINDOW_SIZE: + case GIT_OPT_GET_MWINDOW_MAPPED_LIMIT: + case GIT_OPT_GET_PACK_MAX_OBJECTS: { + size_t option_value; + if (git_libgit2_opts(from_option, &option_value)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + to = Nan::New(option_value); + break; + } + // GET int + case GIT_OPT_GET_OWNER_VALIDATION: { + int option_value; + if (git_libgit2_opts(from_option, &option_value)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + to = Nan::New(option_value); + break; + } + // GET unsigned long + case GIT_OPT_GET_WINDOWS_SHAREMODE: { + unsigned long option_value; + if (git_libgit2_opts(from_option, &option_value)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + to = Nan::New(option_value); + break; + } + // GET ssize_t + case GIT_OPT_GET_CACHED_MEMORY: { + ssize_t option_value; + if (git_libgit2_opts(from_option, &option_value)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + to = Nan::New(option_value); + break; + } + // GET git_buf + case GIT_OPT_GET_TEMPLATE_PATH: + case GIT_OPT_GET_USER_AGENT: { + git_buf option_value = { 0 }; + if (git_libgit2_opts(from_option, &option_value)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + to = Nan::New(option_value.ptr, option_value.size) + .ToLocalChecked(); + git_buf_dispose(&option_value); + break; + } + case GIT_OPT_GET_SEARCH_PATH: { + git_buf option_value = { 0 }; + if (info.Length() < 2 || !info[1]->IsNumber()) { + return Nan::ThrowError("Number option is required."); + } + const int level = (int)info[1].As()->Value(); + if (git_libgit2_opts(from_option, level, &option_value)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + to = Nan::New(option_value.ptr, option_value.size) + .ToLocalChecked(); + git_buf_dispose(&option_value); + break; + } + // SET int + case GIT_OPT_ENABLE_CACHING: + case GIT_OPT_ENABLE_STRICT_OBJECT_CREATION: + case GIT_OPT_ENABLE_STRICT_SYMBOLIC_REF_CREATION: + case GIT_OPT_ENABLE_OFS_DELTA: + case GIT_OPT_ENABLE_FSYNC_GITDIR: + case GIT_OPT_ENABLE_STRICT_HASH_VERIFICATION: + case GIT_OPT_ENABLE_UNSAVED_INDEX_SAFETY: + case GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS: + case GIT_OPT_SET_OWNER_VALIDATION: { + if (info.Length() < 2 || !info[1]->IsNumber()) { + return Nan::ThrowError("Number option is required."); + } + const int option_arg = (int)info[1].As()->Value(); + if (git_libgit2_opts(from_option, option_arg)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + break; + } + // SET size_t + case GIT_OPT_SET_MWINDOW_SIZE: + case GIT_OPT_SET_MWINDOW_MAPPED_LIMIT: + case GIT_OPT_SET_PACK_MAX_OBJECTS: { + if (info.Length() < 2 || !info[1]->IsNumber()) { + return Nan::ThrowError("Number option is required."); + } + const size_t option_arg = (size_t)info[1].As()->Value(); + if (git_libgit2_opts(from_option, option_arg)) { + return Nan::ThrowError("git_libgit2_opts failed"); + } + break; + } + default: { + return Nan::ThrowError("Unsupported option"); + } + } + + return info.GetReturnValue().Set(scope.Escape(to)); +} diff --git a/generate/templates/manual/patches/convenient_patches.cc b/generate/templates/manual/patches/convenient_patches.cc index c7facba4ed..8873fe07c8 100644 --- a/generate/templates/manual/patches/convenient_patches.cc +++ b/generate/templates/manual/patches/convenient_patches.cc @@ -3,36 +3,89 @@ NAN_METHOD(GitPatch::ConvenientFromDiff) { return Nan::ThrowError("Diff diff is required."); } - if (info.Length() == 1 || !info[1]->IsFunction()) { + if (!info[info.Length() - 1]->IsFunction()) { return Nan::ThrowError("Callback is required and must be a Function."); } - ConvenientFromDiffBaton *baton = new ConvenientFromDiffBaton; + ConvenientFromDiffBaton *baton = new ConvenientFromDiffBaton(); baton->error_code = GIT_OK; baton->error = NULL; - baton->diff = Nan::ObjectWrap::Unwrap(info[0]->ToObject())->GetValue(); + baton->diff = Nan::ObjectWrap::Unwrap(Nan::To(info[0]).ToLocalChecked())->GetValue(); + + if (info[1]->IsArray()) { + v8::Local context = Nan::GetCurrentContext(); + const v8::Local indexesArray = info[1].As(); + const uint32_t numIndexes = indexesArray->Length(); + + for (uint32_t i = 0; i < numIndexes; ++i) { + v8::Local value = indexesArray->Get(context, i).ToLocalChecked(); + int idx = value.As()->Value(); + baton->indexes.push_back(idx); + } + } + baton->out = new std::vector; baton->out->reserve(git_diff_num_deltas(baton->diff)); - Nan::Callback *callback = new Nan::Callback(Local::Cast(info[1])); - ConvenientFromDiffWorker *worker = new ConvenientFromDiffWorker(baton, callback); + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + ConvenientFromDiffWorker *worker = new ConvenientFromDiffWorker(baton, callback, cleanupHandles); - worker->SaveToPersistent("diff", info[0]); + worker->Reference("diff", info[0]); - Nan::AsyncQueueWorker(worker); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitPatch::ConvenientFromDiffWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->diff); + return lockMaster; +} + void GitPatch::ConvenientFromDiffWorker::Execute() { - giterr_clear(); + git_error_clear(); - { - LockMaster lockMaster(true, baton->diff); - std::vector patchesToBeFreed; + std::vector patchesToBeFreed; + + if (baton->indexes.size() > 0) { + for (int idx : baton->indexes) { + git_patch *nextPatch; + int result = git_patch_from_diff(&nextPatch, baton->diff, idx); + + if (result) { + while (!patchesToBeFreed.empty()) + { + git_patch_free(patchesToBeFreed.back()); + patchesToBeFreed.pop_back(); + } + + while (!baton->out->empty()) { + PatchDataFree(baton->out->back()); + baton->out->pop_back(); + } + + baton->error_code = result; + + if (git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } - for (int i = 0; i < git_diff_num_deltas(baton->diff); ++i) { + delete baton->out; + baton->out = NULL; + + return; + } + + if (nextPatch != NULL) { + baton->out->push_back(createFromRaw(nextPatch)); + patchesToBeFreed.push_back(nextPatch); + } + } + } else { + for (std::size_t i = 0; i < git_diff_num_deltas(baton->diff); ++i) { git_patch *nextPatch; int result = git_patch_from_diff(&nextPatch, baton->diff, i); @@ -50,8 +103,8 @@ void GitPatch::ConvenientFromDiffWorker::Execute() { baton->error_code = result; - if (giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); + if (git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); } delete baton->out; @@ -65,13 +118,32 @@ void GitPatch::ConvenientFromDiffWorker::Execute() { patchesToBeFreed.push_back(nextPatch); } } + } - while (!patchesToBeFreed.empty()) - { - git_patch_free(patchesToBeFreed.back()); - patchesToBeFreed.pop_back(); + while (!patchesToBeFreed.empty()) + { + git_patch_free(patchesToBeFreed.back()); + patchesToBeFreed.pop_back(); + } +} + +void GitPatch::ConvenientFromDiffWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); } + + free((void *)baton->error); + } + + while (!baton->out->empty()) { + PatchDataFree(baton->out->back()); + baton->out->pop_back(); } + + delete baton->out; + + delete baton; } void GitPatch::ConvenientFromDiffWorker::HandleOKCallback() { @@ -97,12 +169,12 @@ void GitPatch::ConvenientFromDiffWorker::HandleOKCallback() { if (baton->error) { Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method convenientFromDiff has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method convenientFromDiff has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Patch.convenientFromDiff").ToLocalChecked()); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Patch.convenientFromDiff").ToLocalChecked()); Local argv[1] = { err }; @@ -118,9 +190,9 @@ void GitPatch::ConvenientFromDiffWorker::HandleOKCallback() { } if (baton->error_code < 0) { - Local err = Nan::Error("method convenientFromDiff has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Patch.convenientFromDiff").ToLocalChecked()); + Local err = Nan::To(Nan::Error("method convenientFromDiff has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Patch.convenientFromDiff").ToLocalChecked()); Local argv[1] = { err }; @@ -129,5 +201,5 @@ void GitPatch::ConvenientFromDiffWorker::HandleOKCallback() { return; } - callback->Call(0, NULL); + Nan::Call(*callback, 0, NULL); } diff --git a/generate/templates/manual/remote/ls.cc b/generate/templates/manual/remote/ls.cc index 605447b109..97c801c629 100644 --- a/generate/templates/manual/remote/ls.cc +++ b/generate/templates/manual/remote/ls.cc @@ -1,55 +1,69 @@ NAN_METHOD(GitRemote::ReferenceList) { - if (info.Length() == 0 || !info[0]->IsFunction()) { + if (!info[info.Length() - 1]->IsFunction()) { return Nan::ThrowError("Callback is required and must be a Function."); } - ReferenceListBaton* baton = new ReferenceListBaton; + ReferenceListBaton* baton = new ReferenceListBaton(); baton->error_code = GIT_OK; baton->error = NULL; baton->out = new std::vector; baton->remote = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); - Nan::Callback *callback = new Nan::Callback(Local::Cast(info[0])); - ReferenceListWorker *worker = new ReferenceListWorker(baton, callback); - worker->SaveToPersistent("remote", info.This()); - Nan::AsyncQueueWorker(worker); + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + ReferenceListWorker *worker = new ReferenceListWorker(baton, callback, cleanupHandles); + worker->Reference("remote", info.This()); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitRemote::ReferenceListWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->remote); + return lockMaster; +} + void GitRemote::ReferenceListWorker::Execute() { - giterr_clear(); + git_error_clear(); - { - LockMaster lockMaster( - /*asyncAction: */true, - baton->remote - ); - - const git_remote_head **remote_heads; - size_t num_remote_heads; - baton->error_code = git_remote_ls( - &remote_heads, - &num_remote_heads, - baton->remote - ); - - if (baton->error_code != GIT_OK) { - baton->error = git_error_dup(giterr_last()); - delete baton->out; - baton->out = NULL; - return; - } + const git_remote_head **remote_heads; + size_t num_remote_heads; + baton->error_code = git_remote_ls( + &remote_heads, + &num_remote_heads, + baton->remote + ); + + if (baton->error_code != GIT_OK) { + baton->error = git_error_dup(git_error_last()); + delete baton->out; + baton->out = NULL; + return; + } + + baton->out->reserve(num_remote_heads); - baton->out->reserve(num_remote_heads); + for (size_t head_index = 0; head_index < num_remote_heads; ++head_index) { + git_remote_head *remote_head = git_remote_head_dup(remote_heads[head_index]); + baton->out->push_back(remote_head); + } +} - for (size_t head_index = 0; head_index < num_remote_heads; ++head_index) { - git_remote_head *remote_head = git_remote_head_dup(remote_heads[head_index]); - baton->out->push_back(remote_head); +void GitRemote::ReferenceListWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); } + + free((void *)baton->error); } + + delete baton->out; + + delete baton; } void GitRemote::ReferenceListWorker::HandleOKCallback() @@ -85,9 +99,9 @@ void GitRemote::ReferenceListWorker::HandleOKCallback() } else if (baton->error_code < 0) { - Local err = Nan::Error("Reference List has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Remote.referenceList").ToLocalChecked()); + Local err = Nan::To(Nan::Error("Reference List has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Remote.referenceList").ToLocalChecked()); Local argv[1] = { err }; @@ -97,4 +111,6 @@ void GitRemote::ReferenceListWorker::HandleOKCallback() { callback->Call(0, NULL, async_resource); } + + delete baton; } diff --git a/generate/templates/manual/repository/get_references.cc b/generate/templates/manual/repository/get_references.cc new file mode 100644 index 0000000000..56bc12ac34 --- /dev/null +++ b/generate/templates/manual/repository/get_references.cc @@ -0,0 +1,162 @@ +NAN_METHOD(GitRepository::GetReferences) +{ + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + GetReferencesBaton* baton = new GetReferencesBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->out = new std::vector; + baton->repo = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + GetReferencesWorker *worker = new GetReferencesWorker(baton, callback, cleanupHandles); + worker->Reference("repo", info.This()); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + return; +} + +nodegit::LockMaster GitRepository::GetReferencesWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->repo); + return lockMaster; +} + +void GitRepository::GetReferencesWorker::Execute() +{ + giterr_clear(); + + git_repository *repo = baton->repo; + + git_strarray reference_names; + baton->error_code = git_reference_list(&reference_names, repo); + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + delete baton->out; + baton->out = NULL; + return; + } + + for (size_t reference_index = 0; reference_index < reference_names.count; ++reference_index) { + git_reference *reference; + baton->error_code = git_reference_lookup(&reference, repo, reference_names.strings[reference_index]); + + // stop execution and return if there is an error + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + + // unwind and return + while (baton->out->size()) { + git_reference *referenceToFree = baton->out->back(); + baton->out->pop_back(); + git_reference_free(referenceToFree); + } + + git_strarray_free(&reference_names); + delete baton->out; + baton->out = NULL; + return; + } + + if (git_reference_type(reference) == GIT_REF_SYMBOLIC) { + git_reference *resolved_reference; + int resolve_result = git_reference_resolve(&resolved_reference, reference); + git_reference_free(reference); + + // if we can't resolve the ref, then just ignore it + if (resolve_result == GIT_OK) { + baton->out->push_back(resolved_reference); + } + } else { + baton->out->push_back(reference); + } + } + + git_strarray_free(&reference_names); +} + +void GitRepository::GetReferencesWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + while (baton->out->size()) { + git_reference *referenceToFree = baton->out->back(); + baton->out->pop_back(); + git_reference_free(referenceToFree); + } + + delete baton->out; + + delete baton; +} + +void GitRepository::GetReferencesWorker::HandleOKCallback() +{ + if (baton->out != NULL) + { + unsigned int size = baton->out->size(); + Local result = Nan::New(size); + for (unsigned int i = 0; i < size; i++) { + git_reference *reference = baton->out->at(i); + Nan::Set( + result, + Nan::New(i), + GitRefs::New( + reference, + true, + Nan::To(GitRepository::New(git_reference_owner(reference), true)).ToLocalChecked() + ) + ); + } + + delete baton->out; + + Local argv[2] = { + Nan::Null(), + result + }; + callback->Call(2, argv, async_resource); + } + else if (baton->error) + { + Local argv[1] = { + Nan::Error(baton->error->message) + }; + callback->Call(1, argv, async_resource); + if (baton->error->message) + { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + else if (baton->error_code < 0) + { + Local err = Nan::To(Nan::Error("Repository getReferences has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Repository.getReferences").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + } + else + { + callback->Call(0, NULL, async_resource); + } + + delete baton; +} diff --git a/generate/templates/manual/repository/get_remotes.cc b/generate/templates/manual/repository/get_remotes.cc new file mode 100644 index 0000000000..a7c316bb0e --- /dev/null +++ b/generate/templates/manual/repository/get_remotes.cc @@ -0,0 +1,161 @@ +NAN_METHOD(GitRepository::GetRemotes) +{ + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + GetRemotesBaton* baton = new GetRemotesBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->out = new std::vector; + baton->repo = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + GetRemotesWorker *worker = new GetRemotesWorker(baton, callback, cleanupHandles); + worker->Reference("repo", info.This()); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + return; +} + +nodegit::LockMaster GitRepository::GetRemotesWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true); + return lockMaster; +} + +void GitRepository::GetRemotesWorker::Execute() +{ + giterr_clear(); + + git_repository *repo = baton->repo; + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + delete baton->out; + baton->out = NULL; + return; + } + + git_strarray remote_names; + baton->error_code = git_remote_list(&remote_names, repo); + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + + delete baton->out; + baton->out = NULL; + return; + } + + for (size_t remote_index = 0; remote_index < remote_names.count; ++remote_index) { + git_remote *remote; + baton->error_code = git_remote_lookup(&remote, repo, remote_names.strings[remote_index]); + + // stop execution and return if there is an error + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + + // unwind and return + while (baton->out->size()) { + git_remote *remoteToFree = baton->out->back(); + baton->out->pop_back(); + git_remote_free(remoteToFree); + } + + git_strarray_free(&remote_names); + delete baton->out; + baton->out = NULL; + return; + } + + baton->out->push_back(remote); + } + + git_strarray_free(&remote_names); +} + +void GitRepository::GetRemotesWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + while (baton->out->size()) { + git_remote *remoteToFree = baton->out->back(); + baton->out->pop_back(); + git_remote_free(remoteToFree); + } + + delete baton->out; + + delete baton; +} + +void GitRepository::GetRemotesWorker::HandleOKCallback() +{ + if (baton->out != NULL) + { + unsigned int size = baton->out->size(); + Local result = Nan::New(size); + for (unsigned int i = 0; i < size; i++) { + git_remote *remote = baton->out->at(i); + Nan::Set( + result, + Nan::New(i), + GitRemote::New( + remote, + true, + Nan::To(GitRepository::New(git_remote_owner(remote), true)).ToLocalChecked() + ) + ); + } + + delete baton->out; + + Local argv[2] = { + Nan::Null(), + result + }; + callback->Call(2, argv, async_resource); + } + else if (baton->error) + { + Local argv[1] = { + Nan::Error(baton->error->message) + }; + callback->Call(1, argv, async_resource); + if (baton->error->message) + { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + else if (baton->error_code < 0) + { + Local err = Nan::To(Nan::Error("Repository refreshRemotes has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Repository.refreshRemotes").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + } + else + { + callback->Call(0, NULL, async_resource); + } + + delete baton; +} diff --git a/generate/templates/manual/repository/get_submodules.cc b/generate/templates/manual/repository/get_submodules.cc new file mode 100644 index 0000000000..069f6bdbc8 --- /dev/null +++ b/generate/templates/manual/repository/get_submodules.cc @@ -0,0 +1,141 @@ +NAN_METHOD(GitRepository::GetSubmodules) +{ + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + GetSubmodulesBaton* baton = new GetSubmodulesBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->out = new std::vector; + baton->repo = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + GetSubmodulesWorker *worker = new GetSubmodulesWorker(baton, callback, cleanupHandles); + worker->Reference("repo", info.This()); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + return; +} + +struct submodule_foreach_payload { + git_repository *repo; + std::vector *out; +}; + +int foreachSubmoduleCB(git_submodule *submodule, const char *name, void *void_payload) { + submodule_foreach_payload *payload = (submodule_foreach_payload *)void_payload; + git_submodule *out; + + int result = git_submodule_lookup(&out, payload->repo, name); + if (result == GIT_OK) { + payload->out->push_back(out); + } + + return result; +} + +nodegit::LockMaster GitRepository::GetSubmodulesWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->repo); + return lockMaster; +} + +void GitRepository::GetSubmodulesWorker::Execute() +{ + giterr_clear(); + + submodule_foreach_payload payload { baton->repo, baton->out }; + baton->error_code = git_submodule_foreach(baton->repo, foreachSubmoduleCB, (void *)&payload); + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + + while (baton->out->size()) { + git_submodule_free(baton->out->back()); + baton->out->pop_back(); + } + delete baton->out; + baton->out = NULL; + } +} + +void GitRepository::GetSubmodulesWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + while (baton->out->size()) { + git_submodule_free(baton->out->back()); + baton->out->pop_back(); + } + + delete baton->out; + + delete baton; +} + +void GitRepository::GetSubmodulesWorker::HandleOKCallback() +{ + if (baton->out != NULL) + { + unsigned int size = baton->out->size(); + Local result = Nan::New(size); + for (unsigned int i = 0; i < size; i++) { + git_submodule *submodule = baton->out->at(i); + Nan::Set( + result, + Nan::New(i), + GitSubmodule::New( + submodule, + true, + Nan::To(GitRepository::New(git_submodule_owner(submodule), true)).ToLocalChecked() + ) + ); + } + + delete baton->out; + + Local argv[2] = { + Nan::Null(), + result + }; + callback->Call(2, argv, async_resource); + } + else if (baton->error) + { + Local argv[1] = { + Nan::Error(baton->error->message) + }; + callback->Call(1, argv, async_resource); + if (baton->error->message) + { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + else if (baton->error_code < 0) + { + Local err = Nan::To(Nan::Error("Repository getSubmodules has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Repository.getSubmodules").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + } + else + { + callback->Call(0, NULL, async_resource); + } + + delete baton; +} diff --git a/generate/templates/manual/repository/refresh_references.cc b/generate/templates/manual/repository/refresh_references.cc new file mode 100644 index 0000000000..5194f1c48c --- /dev/null +++ b/generate/templates/manual/repository/refresh_references.cc @@ -0,0 +1,712 @@ +int getOidOfReferenceCommit(git_oid *commitOid, git_reference *ref) { + git_object *commitObject; + int result = git_reference_peel(&commitObject, ref, GIT_OBJ_COMMIT); + + if (result != GIT_OK) { + return result; + } + + git_oid_cpy(commitOid, git_object_id(commitObject)); + git_object_free(commitObject); + return result; +} + +int asDirectReference(git_reference **out, git_reference *ref) { + if (git_reference_type(ref) != GIT_REF_SYMBOLIC) { + return git_reference_dup(out, ref); + } + + int error = git_reference_resolve(out, ref); + if (error != GIT_OK) { + *out = NULL; + } + + return GIT_OK; +} + +int lookupDirectReferenceByShorthand(git_reference **out, git_repository *repo, const char *shorthand) { + git_reference *ref = NULL; + int result = git_reference_dwim(&ref, repo, shorthand); + + if (result != GIT_OK) { + return result; + } + + result = asDirectReference(out, ref); + git_reference_free(ref); + return result; +} + +int lookupDirectReferenceByFullName(git_reference **out, git_repository *repo, const char *fullName) { + git_reference *ref = NULL; + int result = git_reference_lookup(&ref, repo, fullName); + + if (result != GIT_OK) { + return result; + } + + result = asDirectReference(out, ref); + git_reference_free(ref); + return result; +} + +char *getRemoteNameOfReference(git_reference *remoteReference) { + return strtok(strdup(git_reference_shorthand(remoteReference)), "/"); +} + +bool gitStrArrayContains(git_strarray *strarray, const char *string) { + for (size_t i = 0; i < strarray->count; ++i) { + if (strcmp(strarray->strings[i], string) == 0) { + return true; + } + } + return false; +} + +class RefreshedRefModel { +public: + RefreshedRefModel(git_reference *ref): + fullName(strdup(git_reference_name(ref))), + message(NULL), + sha(new char[GIT_OID_HEXSZ + 1]), + shorthand(strdup(git_reference_shorthand(ref))), + tagOdbBuffer(NULL), + tagOdbBufferLength(0), + type(NULL) + { + if (git_reference_is_branch(ref)) { + type = "branch"; + } else if (git_reference_is_remote(ref)) { + type = "remote"; + } else { + type = "tag"; + } + } + + RefreshedRefModel(const RefreshedRefModel &) = delete; + RefreshedRefModel(RefreshedRefModel &&) = delete; + RefreshedRefModel &operator=(const RefreshedRefModel &) = delete; + RefreshedRefModel &operator=(RefreshedRefModel &&) = delete; + + static int fromReference(RefreshedRefModel **out, git_reference *ref, git_odb *odb) { + RefreshedRefModel *refModel = new RefreshedRefModel(ref); + const git_oid *referencedTargetOid = git_reference_target(ref); + + if (!git_reference_is_tag(ref)) { + git_oid_tostr(refModel->sha, GIT_OID_HEXSZ + 1, referencedTargetOid); + + *out = refModel; + return GIT_OK; + } + git_repository *repo = git_reference_owner(ref); + + git_tag *referencedTag; + if (git_tag_lookup(&referencedTag, repo, referencedTargetOid) == GIT_OK) { + const char *tagMessage = git_tag_message(referencedTag); + refModel->message = tagMessage ? strdup(tagMessage) : NULL; + + git_odb_object *tagOdbObject; + if (git_odb_read(&tagOdbObject, odb, git_tag_id(referencedTag)) == GIT_OK) { + refModel->tagOdbBufferLength = git_odb_object_size(tagOdbObject); + refModel->tagOdbBuffer = new char[refModel->tagOdbBufferLength]; + std::memcpy(refModel->tagOdbBuffer, git_odb_object_data(tagOdbObject), refModel->tagOdbBufferLength); + git_odb_object_free(tagOdbObject); + } + + git_tag_free(referencedTag); + } + + git_oid peeledReferencedTargetOid; + int error = getOidOfReferenceCommit(&peeledReferencedTargetOid, ref); + if (error != GIT_OK) { + delete refModel; + return error; + } + + git_oid_tostr(refModel->sha, GIT_OID_HEXSZ + 1, &peeledReferencedTargetOid); + + *out = refModel; + return GIT_OK; + } + + static void ensureSignatureRegexes() { + if (!signatureRegexesBySignatureType.IsEmpty()) { + return; + } + + v8::Local gpgsigArray = Nan::New(2), + x509Array = Nan::New(1); + + Nan::Set( + gpgsigArray, + Nan::New(0), + Nan::New( + Nan::New("-----BEGIN PGP SIGNATURE-----[\\s\\S]+?-----END PGP SIGNATURE-----").ToLocalChecked(), + static_cast(v8::RegExp::Flags::kGlobal | v8::RegExp::Flags::kMultiline) + ).ToLocalChecked() + ); + + Nan::Set( + gpgsigArray, + Nan::New(1), + Nan::New( + Nan::New("-----BEGIN PGP MESSAGE-----[\\s\\S]+?-----END PGP MESSAGE-----").ToLocalChecked(), + static_cast(v8::RegExp::Flags::kGlobal | v8::RegExp::Flags::kMultiline) + ).ToLocalChecked() + ); + + Nan::Set( + x509Array, + Nan::New(0), + Nan::New( + Nan::New("-----BEGIN SIGNED MESSAGE-----[\\s\\S]+?-----END SIGNED MESSAGE-----").ToLocalChecked(), + static_cast(v8::RegExp::Flags::kGlobal | v8::RegExp::Flags::kMultiline) + ).ToLocalChecked() + ); + + v8::Local result = Nan::New(); + Nan::Set(result, Nan::New("gpgsig").ToLocalChecked(), gpgsigArray); + Nan::Set(result, Nan::New("x509").ToLocalChecked(), x509Array); + + signatureRegexesBySignatureType.Reset(result); + } + + v8::Local toJavascript(v8::Local signatureType) { + v8::Local result = Nan::New(); + + v8::Local jsFullName; + if (fullName == NULL) { + jsFullName = Nan::Null(); + } else { + jsFullName = Nan::New(fullName).ToLocalChecked(); + } + Nan::Set(result, Nan::New("fullName").ToLocalChecked(), jsFullName); + + v8::Local jsMessage; + if (message == NULL) { + jsMessage = Nan::Null(); + } else { + jsMessage = Nan::New(message).ToLocalChecked(); + } + Nan::Set(result, Nan::New("message").ToLocalChecked(), jsMessage); + + Nan::Set( + result, + Nan::New("sha").ToLocalChecked(), + Nan::New(sha).ToLocalChecked() + ); + + v8::Local jsShorthand; + if (shorthand == NULL) { + jsShorthand = Nan::Null(); + } else { + jsShorthand = Nan::New(shorthand).ToLocalChecked(); + } + Nan::Set(result, Nan::New("shorthand").ToLocalChecked(), jsShorthand); + + v8::Local jsTagSignature = Nan::Null(); + if (tagOdbBuffer != NULL && tagOdbBufferLength != 0) { + // tagOdbBuffer is already a copy, so we'd like to use NewBuffer instead, + // but we were getting segfaults and couldn't easily figure out why. :( + // We tried passing the tagOdbBuffer directly to NewBuffer and then nullifying tagOdbBuffer so that + // the destructor didn't double free, but that still segfaulted internally in Node. + v8::Local buffer = Nan::CopyBuffer(tagOdbBuffer, tagOdbBufferLength).ToLocalChecked(); + v8::Local toStringProp = Nan::Get(buffer, Nan::New("toString").ToLocalChecked()).ToLocalChecked(); + v8::Local jsTagOdbObjectString = Nan::To(Nan::CallAsFunction(Nan::To(toStringProp).ToLocalChecked(), buffer, 0, NULL).ToLocalChecked()).ToLocalChecked(); + + v8::Local _signatureRegexesBySignatureType = Nan::New(signatureRegexesBySignatureType); + v8::Local signatureRegexes = v8::Local::Cast(Nan::Get(_signatureRegexesBySignatureType, signatureType).ToLocalChecked()); + + for (uint32_t i = 0; i < signatureRegexes->Length(); ++i) { + v8::Local argv[] = { + Nan::Get(signatureRegexes, Nan::New(i)).ToLocalChecked() + }; + + v8::Local matchProp = Nan::Get(jsTagOdbObjectString, Nan::New("match").ToLocalChecked()).ToLocalChecked(); + v8::Local match = Nan::CallAsFunction(Nan::To(matchProp).ToLocalChecked(), jsTagOdbObjectString, 1, argv).ToLocalChecked(); + if (match->IsArray()) { + jsTagSignature = Nan::Get(Nan::To(match).ToLocalChecked(), 0).ToLocalChecked(); + break; + } + } + } + Nan::Set(result, Nan::New("tagSignature").ToLocalChecked(), jsTagSignature); + + v8::Local jsType; + if (type == NULL) { + jsType = Nan::Null(); + } else { + jsType = Nan::New(type).ToLocalChecked(); + } + Nan::Set(result, Nan::New("type").ToLocalChecked(), jsType); + + return result; + } + + ~RefreshedRefModel() { + if (fullName != NULL) { free(fullName); } + if (message != NULL) { free(message); } + delete[] sha; + if (shorthand != NULL) { free(shorthand); } + if (tagOdbBuffer != NULL) { delete[] tagOdbBuffer; } + } + + char *fullName, *message, *sha, *shorthand, *tagOdbBuffer; + size_t tagOdbBufferLength; + const char *type; + static Nan::Persistent signatureRegexesBySignatureType; +}; + +Nan::Persistent RefreshedRefModel::signatureRegexesBySignatureType; + +class UpstreamModel { +public: + UpstreamModel(const char *inputDownstreamFullName, const char *inputUpstreamFullName): + downstreamFullName((char *)strdup(inputDownstreamFullName)), + upstreamFullName((char *)strdup(inputUpstreamFullName)), + ahead(0), + behind(0) {} + + UpstreamModel(const UpstreamModel &) = delete; + UpstreamModel(UpstreamModel &&) = delete; + UpstreamModel &operator=(const UpstreamModel &) = delete; + UpstreamModel &operator=(UpstreamModel &&) = delete; + + static bool fromReference(UpstreamModel **out, git_reference *ref) { + if (!git_reference_is_branch(ref)) { + return false; + } + + git_reference *upstream; + int result = git_branch_upstream(&upstream, ref); + if (result != GIT_OK) { + return false; + } + + UpstreamModel *upstreamModel = new UpstreamModel( + git_reference_name(ref), + git_reference_name(upstream) + ); + + git_oid localCommitOid; + result = getOidOfReferenceCommit(&localCommitOid, ref); + if (result != GIT_OK) { + delete upstreamModel; + return false; + } + + git_oid upstreamCommitOid; + result = getOidOfReferenceCommit(&upstreamCommitOid, upstream); + if (result != GIT_OK) { + delete upstreamModel; + return false; + } + + result = git_graph_ahead_behind( + &upstreamModel->ahead, + &upstreamModel->behind, + git_reference_owner(ref), + &localCommitOid, + &upstreamCommitOid + ); + + if (result != GIT_OK) { + delete upstreamModel; + return false; + } + + *out = upstreamModel; + return true; + } + + v8::Local toJavascript() { + v8::Local result = Nan::New(); + + v8::Local jsDownstreamFullName; + if (downstreamFullName == NULL) { + jsDownstreamFullName = Nan::Null(); + } else { + jsDownstreamFullName = Nan::New(downstreamFullName).ToLocalChecked(); + } + Nan::Set(result, Nan::New("downstreamFullName").ToLocalChecked(), jsDownstreamFullName); + + v8::Local jsUpstreamFullName; + if (upstreamFullName == NULL) { + jsUpstreamFullName = Nan::Null(); + } else { + jsUpstreamFullName = Nan::New(upstreamFullName).ToLocalChecked(); + } + Nan::Set(result, Nan::New("upstreamFullName").ToLocalChecked(), jsUpstreamFullName); + + Nan::Set(result, Nan::New("ahead").ToLocalChecked(), Nan::New(ahead)); + Nan::Set(result, Nan::New("behind").ToLocalChecked(), Nan::New(behind)); + return result; + } + + ~UpstreamModel() { + if (downstreamFullName != NULL) { free(downstreamFullName); } + if (upstreamFullName != NULL) { free(upstreamFullName); } + } + + char *downstreamFullName; + char *upstreamFullName; + size_t ahead; + size_t behind; +}; + +class RefreshReferencesData { +public: + RefreshReferencesData(): + headRefFullName(NULL), + cherrypick(NULL), + merge(NULL) {} + + RefreshReferencesData(const RefreshReferencesData &) = delete; + RefreshReferencesData(RefreshReferencesData &&) = delete; + RefreshReferencesData &operator=(const RefreshReferencesData &) = delete; + RefreshReferencesData &operator=(RefreshReferencesData &&) = delete; + + ~RefreshReferencesData() { + while(refs.size()) { + delete refs.back(); + refs.pop_back(); + } + while(upstreamInfo.size()) { + delete upstreamInfo.back(); + upstreamInfo.pop_back(); + } + if (headRefFullName != NULL) { free(headRefFullName); } + if (cherrypick != NULL) { delete cherrypick; } + if (merge != NULL) { delete merge; } + } + + std::vector refs; + std::vector upstreamInfo; + char *headRefFullName; + RefreshedRefModel *cherrypick; + RefreshedRefModel *merge; +}; + +NAN_METHOD(GitRepository::RefreshReferences) +{ + v8::Local signatureType; + if (info.Length() == 2) { + if (!info[0]->IsString()) { + return Nan::ThrowError("Signature type must be \"gpgsig\" or \"x509\"."); + } + + v8::Local signatureTypeParam = Nan::To(info[0]).ToLocalChecked(); + if ( + Nan::Equals(signatureTypeParam, Nan::New("gpgsig").ToLocalChecked()) != Nan::Just(true) + && Nan::Equals(signatureTypeParam, Nan::New("x509").ToLocalChecked()) != Nan::Just(true) + ) { + return Nan::ThrowError("Signature type must be \"gpgsig\" or \"x509\"."); + } + signatureType = signatureTypeParam; + } else { + signatureType = Nan::New("gpgsig").ToLocalChecked(); + } + + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + RefreshReferencesBaton* baton = new RefreshReferencesBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->out = (void *)new RefreshReferencesData(); + baton->repo = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + RefreshReferencesWorker *worker = new RefreshReferencesWorker(baton, callback, cleanupHandles); + worker->Reference("repo", info.This()); + worker->Reference("signatureType", signatureType); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + return; +} + +nodegit::LockMaster GitRepository::RefreshReferencesWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->repo); + return lockMaster; +} + +void GitRepository::RefreshReferencesWorker::Execute() +{ + giterr_clear(); + + git_repository *repo = baton->repo; + RefreshReferencesData *refreshData = (RefreshReferencesData *)baton->out; + git_odb *odb; + + baton->error_code = git_repository_odb(&odb, repo); + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + delete refreshData; + baton->out = NULL; + return; + } + + // START Refresh HEAD + git_reference *headRef = NULL; + baton->error_code = lookupDirectReferenceByShorthand(&headRef, repo, "HEAD"); + + if (baton->error_code != GIT_OK || headRef == NULL) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + git_odb_free(odb); + delete refreshData; + baton->out = NULL; + return; + } + + RefreshedRefModel *headModel; + baton->error_code = RefreshedRefModel::fromReference(&headModel, headRef, odb); + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + git_odb_free(odb); + git_reference_free(headRef); + delete refreshData; + baton->out = NULL; + return; + } + refreshData->refs.push_back(headModel); + + refreshData->headRefFullName = strdup(git_reference_name(headRef)); + git_reference_free(headRef); + // END Refresh HEAD + + // START Refresh CHERRY_PICK_HEAD + git_reference *cherrypickRef = NULL; + if (lookupDirectReferenceByShorthand(&cherrypickRef, repo, "CHERRY_PICK_HEAD") == GIT_OK && cherrypickRef != NULL) { + baton->error_code = RefreshedRefModel::fromReference(&refreshData->cherrypick, cherrypickRef, odb); + git_reference_free(cherrypickRef); + } else { + cherrypickRef = NULL; + } + // END Refresh CHERRY_PICK_HEAD + + // START Refresh MERGE_HEAD + git_reference *mergeRef = NULL; + // fall through if cherry pick failed + if (baton->error_code == GIT_OK && lookupDirectReferenceByShorthand(&mergeRef, repo, "MERGE_HEAD") == GIT_OK && mergeRef != NULL) { + baton->error_code = RefreshedRefModel::fromReference(&refreshData->merge, mergeRef, odb); + git_reference_free(mergeRef); + } else { + mergeRef = NULL; + } + // END Refresh MERGE_HEAD + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + git_odb_free(odb); + delete refreshData; + baton->out = NULL; + return; + } + + // Retrieve reference models and upstream info for each reference + git_strarray referenceNames; + baton->error_code = git_reference_list(&referenceNames, repo); + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + git_odb_free(odb); + delete refreshData; + baton->out = NULL; + return; + } + + git_strarray remoteNames; + baton->error_code = git_remote_list(&remoteNames, repo); + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + git_odb_free(odb); + git_strarray_free(&referenceNames); + delete refreshData; + baton->out = NULL; + return; + } + + for (size_t referenceIndex = 0; referenceIndex < referenceNames.count; ++referenceIndex) { + git_reference *reference; + baton->error_code = lookupDirectReferenceByFullName(&reference, repo, referenceNames.strings[referenceIndex]); + + if (baton->error_code != GIT_OK) { + break; + } + if (reference == NULL) { + // lookup found the reference but failed to resolve it directly + continue; + } + + UpstreamModel *upstreamModel; + if (UpstreamModel::fromReference(&upstreamModel, reference)) { + refreshData->upstreamInfo.push_back(upstreamModel); + } + + bool isBranch = git_reference_is_branch(reference); + bool isRemote = git_reference_is_remote(reference); + bool isTag = git_reference_is_tag(reference); + if ( + strcmp(referenceNames.strings[referenceIndex], headModel->fullName) == 0 + || (!isBranch && !isRemote && !isTag) + ) { + git_reference_free(reference); + continue; + } + + if (isRemote) { + char *remoteNameOfRef = getRemoteNameOfReference(reference); + bool isFromExistingRemote = gitStrArrayContains(&remoteNames, remoteNameOfRef); + free(remoteNameOfRef); + if (!isFromExistingRemote) { + git_reference_free(reference); + continue; + } + } + + RefreshedRefModel *refreshedRefModel; + baton->error_code = RefreshedRefModel::fromReference(&refreshedRefModel, reference, odb); + git_reference_free(reference); + + if (baton->error_code == GIT_OK) { + refreshData->refs.push_back(refreshedRefModel); + } else { + baton->error_code = GIT_OK; + } + } + + git_odb_free(odb); + git_strarray_free(&remoteNames); + git_strarray_free(&referenceNames); + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + delete refreshData; + baton->out = NULL; + return; + } +} + +void GitRepository::RefreshReferencesWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + RefreshReferencesData *refreshData = (RefreshReferencesData *)baton->out; + delete refreshData; + + delete baton; +} + +void GitRepository::RefreshReferencesWorker::HandleOKCallback() +{ + if (baton->out != NULL) + { + RefreshedRefModel::ensureSignatureRegexes(); + auto refreshData = (RefreshReferencesData *)baton->out; + v8::Local result = Nan::New(); + + Nan::Set( + result, + Nan::New("headRefFullName").ToLocalChecked(), + Nan::New(refreshData->headRefFullName).ToLocalChecked() + ); + + v8::Local signatureType = Nan::To(GetFromPersistent("signatureType")).ToLocalChecked(); + + unsigned int numRefs = refreshData->refs.size(); + v8::Local refs = Nan::New(numRefs); + for (unsigned int i = 0; i < numRefs; ++i) { + RefreshedRefModel *refreshedRefModel = refreshData->refs[i]; + Nan::Set(refs, Nan::New(i), refreshedRefModel->toJavascript(signatureType)); + } + Nan::Set(result, Nan::New("refs").ToLocalChecked(), refs); + + unsigned int numUpstreamInfo = refreshData->upstreamInfo.size(); + v8::Local upstreamInfo = Nan::New(numUpstreamInfo); + for (unsigned int i = 0; i < numUpstreamInfo; ++i) { + UpstreamModel *upstreamModel = refreshData->upstreamInfo[i]; + Nan::Set(upstreamInfo, Nan::New(i), upstreamModel->toJavascript()); + } + Nan::Set(result, Nan::New("upstreamInfo").ToLocalChecked(), upstreamInfo); + + if (refreshData->cherrypick != NULL) { + Nan::Set( + result, + Nan::New("cherrypick").ToLocalChecked(), + refreshData->cherrypick->toJavascript(signatureType) + ); + } else { + Nan::Set(result, Nan::New("cherrypick").ToLocalChecked(), Nan::Null()); + } + + if (refreshData->merge != NULL) { + Nan::Set( + result, + Nan::New("merge").ToLocalChecked(), + refreshData->merge->toJavascript(signatureType) + ); + } else { + Nan::Set(result, Nan::New("merge").ToLocalChecked(), Nan::Null()); + } + + delete refreshData; + + Local argv[2] = { + Nan::Null(), + result + }; + callback->Call(2, argv, async_resource); + } + else if (baton->error) + { + Local argv[1] = { + Nan::Error(baton->error->message) + }; + callback->Call(1, argv, async_resource); + if (baton->error->message) + { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + else if (baton->error_code < 0) + { + Local err = Nan::To(Nan::Error("Repository refreshReferences has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Repository.refreshReferences").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + } + else + { + callback->Call(0, NULL, async_resource); + } + + delete baton; +} diff --git a/generate/templates/manual/repository/statistics.cc b/generate/templates/manual/repository/statistics.cc new file mode 100644 index 0000000000..f438bb5f8d --- /dev/null +++ b/generate/templates/manual/repository/statistics.cc @@ -0,0 +1,1892 @@ +/** + * \struct CommitsGraphNode + */ +struct CommitsGraphNode +{ + CommitsGraphNode(uint32_t aParentsLeft) : parentsLeft(aParentsLeft) {} + CommitsGraphNode() = default; + ~CommitsGraphNode() = default; + CommitsGraphNode(const CommitsGraphNode &other) = delete; + CommitsGraphNode(CommitsGraphNode &&other) = delete; + CommitsGraphNode& operator=(const CommitsGraphNode &other) = delete; + CommitsGraphNode& operator=(CommitsGraphNode &&other) = delete; + + std::vector children {}; + uint32_t parentsLeft {0}; // used when calculating the maximum history depth +}; + +/** + * \class CommitsGraph + */ +class CommitsGraph +{ +public: + CommitsGraph() = default; + ~CommitsGraph() = default; + CommitsGraph(const CommitsGraph &other) = delete; + CommitsGraph(CommitsGraph &&other) = delete; + CommitsGraph& operator=(const CommitsGraph &other) = delete; + CommitsGraph& operator=(CommitsGraph &&other) = delete; + + using CommitsGraphMap = std::unordered_map>; + + void AddNode(const std::string &oidStr, const std::vector &parents); + uint32_t CalculateMaxDepth(); + +private: + void addParentNode(const std::string &oidParentStr, CommitsGraphNode *child); + + CommitsGraphMap m_mapOidNode {}; + std::vector m_roots {}; +}; + +/** + * CommitsGraph::AddNode + * + * \param oidStr oid of the commit object to add. + * \param parents oids of the commit's parents. + */ +void CommitsGraph::AddNode(const std::string &oidStr, const std::vector &parents) +{ + const uint32_t numParents = static_cast(parents.size()); + + auto emplacePair = m_mapOidNode.emplace(std::make_pair( + oidStr, std::make_unique(numParents))); + + CommitsGraphMap::iterator itNode = emplacePair.first; + + // if this node already added by a child, update its parentsLeft + if (emplacePair.second == false) { + itNode->second.get()->parentsLeft = numParents; + } + + // set roots + if (numParents == 0) { + m_roots.emplace_back(itNode->second.get()); + } + + // add parents + for (unsigned int i = 0; i < numParents; ++i) { + addParentNode(parents.at(i), itNode->second.get()); + } +} + +/** + * CommitsGraph::CalculateMaxDepth + * \return Calculated maximum depth of the tree. + * + * Uses iterative algorithm to count levels. + * Considers multiple initial commits. + * Considers that children of one level can have multiple parents, hence we insert unique children + * at each level. + * Considers that same child can be in different levels. Here to prevent counting the same child + * multiple times, we only add a child when the last parent (parentsLeft) inserts it. This is + * actually what makes the algorithm fast. + * Recursive algorithm avoided to prevent stack overflow in case of excessive levels in the tree. + * + * Explanation of the algorithm: + * once the graph is built with the commit history, `CalculateMaxDepth()` counts the maximum number + * of levels from any of the roots to any of the leaves, which gives us the maximum depth + * (`historyStructure.maxDepth` in the final result). + * Inside `CalculateMaxDepth()`, to count levels, we add in an iterative way for each level and + * starting at the roots level, all the children from that level, but only if each child is the last + * time we'll consider it in the algorithm (for example if a child node 'C' has 2 parents 'P1' and + * 'P2', and 'P1' has already been considered before in the algorithm as parent of 'C', and now we are + * processing 'C' as a child from 'P2', which will be the last time, as 'C' has no more parents left). + * This way we prevent counting 'C' multiple times. + */ +uint32_t CommitsGraph::CalculateMaxDepth() +{ + uint32_t maxDepth {0}; + std::unordered_set parents {}; + std::unordered_set children {}; + + // start from the root commmits + for (CommitsGraphNode *root : m_roots) { + children.insert(root); + } + + while (!children.empty()) { + ++maxDepth; + parents = std::move(children); + + // add unique children of next level, and only if from the last parent + for (CommitsGraphNode *parent : parents) { + for (CommitsGraphNode *child : parent->children) { + if (--child->parentsLeft == 0) { + children.insert(child); + } + } + } + } + + return maxDepth; +} + +/** + * CommitsGraph::addParentNode + * + * \param oidParentStr oid of the parent commit to add. + * \param child Child of the parent commit being added. + */ +void CommitsGraph::addParentNode(const std::string &oidParentStr, CommitsGraphNode *child) +{ + CommitsGraphMap::iterator itParentNode = m_mapOidNode.emplace(std::make_pair( + oidParentStr, std::make_unique())).first; + + // add child to parent + itParentNode->second->children.emplace_back(child); +} + +/** + * \struct TreeStatistics + * Structure to store statistics for a git tree object. + */ +struct TreeStatistics +{ + TreeStatistics() = default; + ~TreeStatistics() = default; + TreeStatistics(const TreeStatistics &other) = delete; + TreeStatistics(TreeStatistics &&other) = default; + TreeStatistics& operator=(const TreeStatistics &other) = delete; + TreeStatistics& operator=(TreeStatistics &&other) = default; + + size_t numDirectories{0}; + size_t maxPathDepth {0}; + size_t maxPathLength {0}; + size_t numFiles {0}; + size_t totalFileSize {0}; + size_t numSymlinks {0}; + size_t numSubmodules {0}; +}; + +/** + * \struct Statistics + * Stores statistics of the analyzed repository. + */ +struct Statistics +{ + Statistics() = default; + ~Statistics() = default; + Statistics(const Statistics &other) = delete; + Statistics(Statistics &&other) = delete; + Statistics& operator=(const Statistics &other) = delete; + Statistics& operator=(Statistics &&other) = delete; + + struct { + struct { size_t count {0}; size_t size {0}; } commits; + struct { size_t count {0}; size_t size {0}; size_t entries {0}; } trees; + struct { size_t count {0}; size_t size {0}; } blobs; + struct { size_t count {0}; } annotatedTags; + struct { size_t count {0}; } references; + } repositorySize {}; + + struct { + struct { size_t maxSize {0}; size_t maxParents {0}; } commits; + struct { size_t maxEntries {0}; } trees; + struct { size_t maxSize {0}; } blobs; + } biggestObjects {}; + + struct { + uint32_t maxDepth {0}; + uint32_t maxTagDepth {0}; + } historyStructure {}; + + TreeStatistics biggestCheckouts {}; +}; + +/** + * \struct OdbObjectsData + * Structure to store, for each object read from the repository: + * - its information (size, parents for a commit, etc.) + * - different data needed to obtain the resulting statistics + */ +struct OdbObjectsData +{ + static constexpr uint32_t kUnreachable = 0; + + struct CommitInfo { + std::string oidTree {}; + size_t size {0}; + std::vector parents {}; + // number of sources from which a commit can be reached: + // a child commit, a tag, or a direct git reference + uint32_t reachability {kUnreachable}; + }; + + struct TreeInfoAndStats { + size_t size {0}; + size_t numEntries {0}; + std::vector entryBlobs {}; + std::vector< std::pair > entryTreesNameLen {}; + // number of sources from which a tree can be reached: + // a commit, another tree's entry, or a tag + uint32_t reachability {kUnreachable}; + TreeStatistics stats {}; + bool statsDone {false}; + }; + + struct BlobInfo { + size_t size {0}; + // number of sources from which a blob can be reached: + // a tree's entry, or a tag + uint32_t reachability {kUnreachable}; + }; + + struct TagInfo { + static constexpr uint32_t kUnsetDepth = 0; + + std::string oidTarget {}; + git_object_t typeTarget {GIT_OBJECT_INVALID}; + uint32_t depth {kUnsetDepth}; + // number of sources from which a tag can be reached: + // a reference, or another tag + uint32_t reachability {kUnreachable}; + }; + + OdbObjectsData() = default; + ~OdbObjectsData() = default; + OdbObjectsData(const OdbObjectsData &other) = delete; + OdbObjectsData(OdbObjectsData &&other) = delete; + OdbObjectsData& operator=(const OdbObjectsData &other) = delete; + OdbObjectsData& operator=(OdbObjectsData &&other) = delete; + + struct { + std::unordered_map info {}; + std::unordered_set unreachables {}; + // Tree of commits (graph) to be built after having read the object + // database, and pruned unreachable objects. + // Used to calculate the maximum history depth. + CommitsGraph graph {}; + size_t totalSize {0}; + size_t maxSize {0}; + size_t maxParents {0}; + } commits {}; + + struct { + std::unordered_map info; + std::unordered_set unreachables {}; + size_t totalSize {0}; + size_t totalEntries {0}; + size_t maxEntries {0}; + } trees {}; + + struct { + std::unordered_map info {}; + std::unordered_set unreachables {}; + size_t totalSize {0}; + size_t maxSize {0}; + } blobs {}; + + struct { + std::unordered_map info; + std::unordered_set unreachables {}; + } tags {}; + + struct { + std::mutex commits {}; + std::mutex trees {}; + std::mutex blobs {}; + std::mutex tags {}; + } infoMutex; + + using iterCommitInfo = std::unordered_map::iterator; + using iterUnreachable = std::unordered_set::iterator; + using iterTreeInfo = std::unordered_map::iterator; + using iterBlobInfo = std::unordered_map::iterator; + using iterTagInfo = std::unordered_map::iterator; +}; + +/** + * \class WorkItemOid + * WorkItem storing odb oids for the WorkPool. + */ +class WorkItemOid : public WorkItem { +public: + WorkItemOid(const git_oid &oid) + : m_oid(oid) {} + ~WorkItemOid() = default; + WorkItemOid(const WorkItemOid &other) = delete; + WorkItemOid(WorkItemOid &&other) = delete; + WorkItemOid& operator=(const WorkItemOid &other) = delete; + WorkItemOid& operator=(WorkItemOid &&other) = delete; + + const git_oid& GetOid() const { return m_oid; } + +private: + git_oid m_oid {}; +}; + +/** + * \class WorkerStoreOdbData + * Worker for the WorkPool to store odb object data. + */ +class WorkerStoreOdbData : public IWorker +{ +public: + WorkerStoreOdbData(const std::string &repoPath, OdbObjectsData *odbObjectsData) + : m_repoPath(repoPath), m_odbObjectsData(odbObjectsData) {} + ~WorkerStoreOdbData(); + WorkerStoreOdbData(const WorkerStoreOdbData &other) = delete; + WorkerStoreOdbData(WorkerStoreOdbData &&other) = delete; + WorkerStoreOdbData& operator=(const WorkerStoreOdbData &other) = delete; + WorkerStoreOdbData& operator=(WorkerStoreOdbData &&other) = delete; + + bool Initialize(); + bool Execute(std::unique_ptr &&work); + +private: + OdbObjectsData::TreeInfoAndStats thisTreeInfoAndStats(const git_tree *tree, size_t size, size_t numEntries); + + std::string m_repoPath {}; + git_repository *m_repo {nullptr}; + git_odb *m_odb {nullptr}; + OdbObjectsData *m_odbObjectsData {nullptr}; +}; + +/** + * WorkerStoreOdbData::~WorkerStoreOdbData + */ +WorkerStoreOdbData::~WorkerStoreOdbData() { + if (m_odb) { + git_odb_free(m_odb); + } + if (m_repo) { + git_repository_free(m_repo); + } +} + +/** + * WorkerStoreOdbData::Initialize + */ +bool WorkerStoreOdbData::Initialize() { + if (m_repo != nullptr) { // if already initialized + return true; + } + + return !m_repoPath.empty() && + git_repository_open(&m_repo, m_repoPath.c_str()) == GIT_OK && + git_repository_odb(&m_odb, m_repo) == GIT_OK; +} + +/** + * WorkerStoreOdbData::Execute + */ +bool WorkerStoreOdbData::Execute(std::unique_ptr &&work) +{ + std::unique_ptr wi {static_cast(work.release())}; + const git_oid &oid = wi->GetOid(); + + // NOTE about PERFORMANCE (May 2021): + // git_object_lookup() is as expensive as git_odb_read(). + // They give access to different information from the libgit2 API. + // Try to call only one of them if possible. + + git_object *target {nullptr}; + if (git_object_lookup(&target, m_repo, &oid, GIT_OBJECT_ANY) != GIT_OK) { + return false; + } + + switch (git_object_type(target)) + { + case GIT_OBJECT_COMMIT: + { + git_commit *commit = (git_commit*)target; + // NOTE about PERFORMANCE (May 2021): + // calling git_odb_object_size() was slightly faster than calculating header size + message size + 1 with GK repo + + // obtain size + git_odb_object *obj {nullptr}; + if (git_odb_read(&obj, m_odb, &oid) != GIT_OK) { + git_object_free(target); + return false; + } + const size_t size = git_odb_object_size(obj); + git_odb_object_free(obj); + + // obtain CommitInfo + const unsigned int numParents = git_commit_parentcount(commit); + std::vector parents {}; + for (unsigned int i = 0; i < numParents; ++i) { + parents.emplace_back(reinterpret_cast(git_commit_parent_id(commit, i)->id), + GIT_OID_RAWSZ); + } + + OdbObjectsData::CommitInfo commitInfo { + std::string(reinterpret_cast(git_commit_tree_id(commit)->id), GIT_OID_RAWSZ), + size, + std::move(parents), + OdbObjectsData::kUnreachable}; + + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.commits); + + m_odbObjectsData->commits.info.emplace(std::make_pair( + std::string(reinterpret_cast(oid.id), GIT_OID_RAWSZ), + std::move(commitInfo))); + } + } + break; + + case GIT_OBJECT_TREE: + { + git_tree *tree = (git_tree*)target; + + // do not count empty trees, like git's empty tree "4b825dc642cb6eb9a060e54bf8d69288fbee4904" + const size_t numEntries = git_tree_entrycount(tree); + if (numEntries == 0) { + git_object_free(target); + return true; + } + + // obtain size + git_odb_object *obj {nullptr}; + if (git_odb_read(&obj, m_odb, &oid) != GIT_OK) { + git_object_free(target); + return false; + } + const size_t size = git_odb_object_size(obj); + git_odb_object_free(obj); + + // obtain tree data and calculate statistics for only this tree (not recursively) + OdbObjectsData::TreeInfoAndStats treeInfoAndStats = thisTreeInfoAndStats(tree, size, numEntries); + + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.trees); + + m_odbObjectsData->trees.info.emplace(std::make_pair( + std::string(reinterpret_cast(oid.id), GIT_OID_RAWSZ), + std::move(treeInfoAndStats))); + } + } + break; + + case GIT_OBJECT_BLOB: + { + git_blob *blob = (git_blob*)target; + const size_t size = git_blob_rawsize(blob); + OdbObjectsData::BlobInfo blobInfo {size, OdbObjectsData::kUnreachable}; + + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.blobs); + + m_odbObjectsData->blobs.info.emplace(std::make_pair( + std::string(reinterpret_cast(oid.id), GIT_OID_RAWSZ), + std::move(blobInfo))); + } + } + break; + + case GIT_OBJECT_TAG: + { + // obtain TagInfo + git_tag *tag = (git_tag*)target; + const git_oid *oid_target = git_tag_target_id(tag); + OdbObjectsData::TagInfo tagInfo { + std::string(reinterpret_cast(oid_target->id), GIT_OID_RAWSZ), + git_tag_target_type(tag), + OdbObjectsData::TagInfo::kUnsetDepth, + OdbObjectsData::kUnreachable}; + + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.tags); + m_odbObjectsData->tags.info.emplace(std::make_pair( + std::string(reinterpret_cast(oid.id), GIT_OID_RAWSZ), + std::move(tagInfo))); + } + } + break; + + default: + break; + } + + git_object_free(target); + + return true; +} + +/** + * WorkerStoreOdbData::thisTreeInfoAndStats + * + * Obtain tree data and calculate the part of this tree's statistics that each thread can do. + * + * \param tree tree to get data from and calculate partial statistics of. + * \param size tree size, to be added to the final result. + * \param numEntries number of entries of this tree. + * \return this tree's data and partial statistics. + */ +OdbObjectsData::TreeInfoAndStats WorkerStoreOdbData::thisTreeInfoAndStats(const git_tree *tree, size_t size, + size_t numEntries) +{ + OdbObjectsData::TreeInfoAndStats treeInfoAndStats {}; + treeInfoAndStats.size = size; + treeInfoAndStats.numEntries = numEntries; + + for (size_t i = 0; i < numEntries; ++i) + { + const git_tree_entry *te = git_tree_entry_byindex(tree, i); + if (te == nullptr) { + continue; + } + const git_object_t te_type = git_tree_entry_type(te); + const char *teName {nullptr}; + size_t teNameLen {0}; + const git_oid *te_oid {nullptr}; + + switch (te_type) + { + // count submodules + case GIT_OBJECT_COMMIT: + if (git_tree_entry_filemode(te) == GIT_FILEMODE_COMMIT) { + ++treeInfoAndStats.stats.numSubmodules; + } + break; + + case GIT_OBJECT_BLOB: + { + // count symbolic links, but don't add them as blob entries + if (git_tree_entry_filemode(te) == GIT_FILEMODE_LINK) { + ++treeInfoAndStats.stats.numSymlinks; + } + else { + ++treeInfoAndStats.stats.numFiles; + teName = git_tree_entry_name(te); + teNameLen = std::char_traits::length(teName); + treeInfoAndStats.stats.maxPathLength = + std::max(treeInfoAndStats.stats.maxPathLength, teNameLen); + } + // store both types of files (symbolic links and non symbolic links) as entryBlob + te_oid = git_tree_entry_id(te); + treeInfoAndStats.entryBlobs.emplace_back( + reinterpret_cast(te_oid->id), GIT_OID_RAWSZ); + } + break; + + case GIT_OBJECT_TREE: + { + // We store tree's name length to compare in posterior stage, after threads work + teName = git_tree_entry_name(te); + teNameLen = std::char_traits::length(teName); + + te_oid = git_tree_entry_id(te); + treeInfoAndStats.entryTreesNameLen.emplace_back(std::make_pair( + std::string(reinterpret_cast(te_oid->id), GIT_OID_RAWSZ), + teNameLen)); + } + break; + + default: + break; + } + } + + return treeInfoAndStats; +} + +/** + * \class WorkItemOidStrType + * WorkItem storing pointers to object info structs for the WorkPool. + */ +class WorkItemOidStrType : public WorkItem { +public: + WorkItemOidStrType(void *objectInfo, git_object_t type) + : m_objectInfo(objectInfo), m_oid_type(type) {} + ~WorkItemOidStrType() = default; + WorkItemOidStrType(const WorkItemOidStrType &other) = delete; + WorkItemOidStrType(WorkItemOidStrType &&other) = delete; + WorkItemOidStrType& operator=(const WorkItemOidStrType &other) = delete; + WorkItemOidStrType& operator=(WorkItemOidStrType &&other) = delete; + + void* GetObjectInfo() const { return m_objectInfo; } + const git_object_t& GetOidType() const { return m_oid_type; } + +private: + void *m_objectInfo {nullptr}; + git_object_t m_oid_type {}; +}; + +/** + * \class WorkerReachCounter + * Worker for the WorkPool to count reachability of each object. + */ +class WorkerReachCounter : public IWorker +{ +public: + WorkerReachCounter(OdbObjectsData *odbObjectsData) + : m_odbObjectsData(odbObjectsData) {} + ~WorkerReachCounter() = default; + WorkerReachCounter(const WorkerReachCounter &other) = delete; + WorkerReachCounter(WorkerReachCounter &&other) = delete; + WorkerReachCounter& operator=(const WorkerReachCounter &other) = delete; + WorkerReachCounter& operator=(WorkerReachCounter &&other) = delete; + + bool Initialize() { return true; } + bool Execute(std::unique_ptr &&work); + +private: + void setReachabilityFromTags(void *objectInfo); + void setReachabilityFromCommits(void *objectInfo); + void setReachabilityFromTrees(void *objectInfo); + + OdbObjectsData *m_odbObjectsData {nullptr}; +}; + +/** + * WorkerReachCounter::Execute + */ +bool WorkerReachCounter::Execute(std::unique_ptr &&work) +{ + std::unique_ptr wi {static_cast(work.release())}; + void *objectInfo = wi->GetObjectInfo(); + const git_object_t &oid_type = wi->GetOidType(); + + switch (oid_type) { + case GIT_OBJECT_TAG: + setReachabilityFromTags(objectInfo); + break; + case GIT_OBJECT_COMMIT: + setReachabilityFromCommits(objectInfo); + break; + case GIT_OBJECT_TREE: + setReachabilityFromTrees(objectInfo); + break; + case GIT_OBJECT_BLOB: + // do not process blobs in this stage + break; + default: + break; + } + + return true; +} + +/** + * WorkerReachCounter::setReachabilityFromTags + * Adds reachability counter where tags point (any type of object). + */ +void WorkerReachCounter::setReachabilityFromTags(void *objectInfo) +{ + const OdbObjectsData::TagInfo *tagInfo = static_cast(objectInfo); + + switch (tagInfo->typeTarget) { + case GIT_OBJECT_COMMIT: + { + OdbObjectsData::iterCommitInfo itCommitInfo = + m_odbObjectsData->commits.info.find(tagInfo->oidTarget); + + if (itCommitInfo != m_odbObjectsData->commits.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.commits); + ++itCommitInfo->second.reachability; + } + } + } + break; + + case GIT_OBJECT_TREE: + { + OdbObjectsData::iterTreeInfo itTreeInfo = + m_odbObjectsData->trees.info.find(tagInfo->oidTarget); + + if (itTreeInfo != m_odbObjectsData->trees.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.trees); + ++itTreeInfo->second.reachability; + } + } + } + + case GIT_OBJECT_BLOB: + { + OdbObjectsData::iterBlobInfo itBlobInfo = + m_odbObjectsData->blobs.info.find(tagInfo->oidTarget); + + if (itBlobInfo != m_odbObjectsData->blobs.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.blobs); + ++itBlobInfo->second.reachability; + } + } + } + + case GIT_OBJECT_TAG: + { + OdbObjectsData::iterTagInfo itTargetTagInfo = + m_odbObjectsData->tags.info.find(tagInfo->oidTarget); + + if (itTargetTagInfo != m_odbObjectsData->tags.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.tags); + ++itTargetTagInfo->second.reachability; + } + } + } + default: + break; + } +} + +/** + * WorkerReachCounter::setReachabilityFromCommits + * Adds reachability counter where commits point (parents and tree). + */ +void WorkerReachCounter::setReachabilityFromCommits(void *objectInfo) +{ + const OdbObjectsData::CommitInfo *commitInfo = + static_cast(objectInfo); + const size_t numParents = commitInfo->parents.size(); + + // set parents' reachability + for (size_t i = 0; i < numParents; ++i) { + OdbObjectsData::iterCommitInfo itParentCommitInfo = + m_odbObjectsData->commits.info.find(commitInfo->parents.at(i)); + + if (itParentCommitInfo != m_odbObjectsData->commits.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.commits); + ++itParentCommitInfo->second.reachability; + } + } + } + + // add 1 to its tree's reachability + OdbObjectsData::iterTreeInfo itCommitTreeInfo = + m_odbObjectsData->trees.info.find(commitInfo->oidTree); + + if (itCommitTreeInfo != m_odbObjectsData->trees.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.trees); + ++itCommitTreeInfo->second.reachability; + } + } +} + +/** + * WorkerReachCounter::setReachabilityFromTrees + * Adds reachability counter where tree entries point (blobs and other trees). + */ +void WorkerReachCounter::setReachabilityFromTrees(void *objectInfo) +{ + const OdbObjectsData::TreeInfoAndStats *treeInfo = + static_cast(objectInfo); + + // set entry blobs' reachability + for (auto &blob : treeInfo->entryBlobs) { + OdbObjectsData::iterBlobInfo itBlobInfo = m_odbObjectsData->blobs.info.find(blob); + + if (itBlobInfo != m_odbObjectsData->blobs.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.blobs); + ++itBlobInfo->second.reachability; + } + } + } + + // set entry trees' reachability + for (auto &treeNameLen : treeInfo->entryTreesNameLen) { + OdbObjectsData::iterTreeInfo itTreeInfo = m_odbObjectsData->trees.info.find(treeNameLen.first); + + if (itTreeInfo != m_odbObjectsData->trees.info.end()) { + { // lock + std::lock_guard lock(m_odbObjectsData->infoMutex.trees); + ++itTreeInfo->second.reachability; + } + } + } +} + +/** + * forEachOdbCb. Callback for git_odb_foreach. + * Returns GIT_OK on success; GIT_EUSER otherwise + */ +static int forEachOdbCb(const git_oid *oid, void *payloadToCast) +{ + WorkerPool *workerPool = + static_cast*>(payloadToCast); + + // Must insert copies of oid, since the pointers might not survive until worker thread picks it up + workerPool->InsertWork(std::make_unique(*oid)); + + // check there were no problems inserting work + if (workerPool->Status() != WPStatus::kOk) { + return GIT_EUSER; + } + + return GIT_OK; +} + +/** + * \class RepoAnalysis + * Class to analyse and hold repository statistics + */ +class RepoAnalysis +{ +public: + static constexpr unsigned int kMinThreads = 4; + + explicit RepoAnalysis(git_repository *repo) + : m_repo(repo) {} + ~RepoAnalysis() = default; + RepoAnalysis(const RepoAnalysis &other) = delete; + RepoAnalysis(RepoAnalysis &&other) = delete; + RepoAnalysis& operator=(const RepoAnalysis &other) = delete; + RepoAnalysis& operator=(RepoAnalysis &&other) = delete; + + int Analyze(); + v8::Local StatisticsToJS() const; + +private: + // stage 1 methods: store data from repository (with threads) + int storeObjectsInfo(); + int storeAndCountRefs(); + // stage 2 methods: count reachability of each object (with threads) + // NOTE: we need this stage, since so far libgit2 doesn't provide unreachable objects + bool setObjectsReachability(); + void setReachabilityFromRefs(); + void setUnreachables(); + // stage 3 methods: prune unreachable oids + void pruneUnreachables(); + void pruneUnreachableTags(); + void pruneUnreachableCommits(); + void pruneUnreachableTrees(); + void pruneUnreachableBlobs(); + // stage 4 methods: repositorySize and biggestObjects + void statsCountAndMax(); + // stage 5 methods: historyStructure and biggestCheckouts + bool statsHistoryAndBiggestCheckouts(); + bool calculateBiggestCheckouts(); + OdbObjectsData::iterTreeInfo calculateTreeStatistics(const std::string &oidTree); + bool calculateMaxTagDepth(); + OdbObjectsData::iterTagInfo calculateTagDepth(const std::string &oidTag); + // methods to return the statistics calculated + void fillOutStatistics(); + v8::Local repositorySizeToJS() const; + v8::Local biggestObjectsToJS() const; + v8::Local historyStructureToJS() const; + v8::Local biggestCheckoutsToJS() const; + + git_repository *m_repo {nullptr}; + Statistics m_statistics {}; + // odb objects info to build while reading the object database by each thread + OdbObjectsData m_odbObjectsData {}; + // oid and type of peeled references + std::unordered_map m_peeledRefs {}; +}; + +/** + * RepoAnalysis::Analyze + * To obtain the final result, the whole process is run in different stages. + * If a stage leverages threads via a worker pool, the worker pool is created + * and we wait until all the threads are done to continue with the next stage. + */ +int RepoAnalysis::Analyze() +{ + int errorCode {GIT_OK}; + + // stage 1 + if ((errorCode = storeObjectsInfo() != GIT_OK)) { + return errorCode; + } + + // stage 2 + if (!setObjectsReachability()) { + return GIT_EUSER; + } + + // stage 3 + pruneUnreachables(); + + // stage 4 + statsCountAndMax(); + + // stage 5 + if (!statsHistoryAndBiggestCheckouts()) { + return GIT_EUSER; + } + + fillOutStatistics(); + + return errorCode; +} + +/** + * RepoAnalysis::StatisticsToJS + */ +v8::Local RepoAnalysis::StatisticsToJS() const +{ + v8::Local result = Nan::New(); + + v8::Local repositorySize = repositorySizeToJS(); + Nan::Set(result, Nan::New("repositorySize").ToLocalChecked(), repositorySize); + + v8::Local biggestObjects = biggestObjectsToJS(); + Nan::Set(result, Nan::New("biggestObjects").ToLocalChecked(), biggestObjects); + + v8::Local historyStructure = historyStructureToJS(); + Nan::Set(result, Nan::New("historyStructure").ToLocalChecked(), historyStructure); + + v8::Local biggestCheckouts = biggestCheckoutsToJS(); + Nan::Set(result, Nan::New("biggestCheckouts").ToLocalChecked(), biggestCheckouts); + + return result; +} + +/** + * RepoAnalysis::storeObjectsInfo + * Store information from read odb objects. + * Starts building a container which eventually will hold only reachable objects. + * Leverages threads via a worker pool . + */ +int RepoAnalysis::storeObjectsInfo() +{ + int errorCode {GIT_OK}; + + // get the objects database + git_odb *odb {nullptr}; + if ((errorCode = git_repository_odb(&odb, m_repo)) != GIT_OK) { + return errorCode; + } + + // initialize workers for the worker pool + const std::string repoPath = git_repository_path(m_repo); + const unsigned int numThreads = + std::max(std::thread::hardware_concurrency(), static_cast(kMinThreads)); + + std::vector< std::shared_ptr > workers {}; + for (unsigned int i = 0; i < numThreads; ++i) { + workers.emplace_back(std::make_shared(repoPath, &m_odbObjectsData)); + } + + // initialize worker pool + WorkerPool workerPool {}; + workerPool.Init(workers); + + if ((errorCode = git_odb_foreach(odb, forEachOdbCb, &workerPool)) != GIT_OK) { + workerPool.Shutdown(); + git_odb_free(odb); + return errorCode; + } + + // main thread will work on the refs while waiting for the threads to finish + if ((errorCode = storeAndCountRefs() != GIT_OK)) { + workerPool.Shutdown(); + git_odb_free(odb); + return errorCode; + } + + // wait for the threads to finish and shutdown the work pool + workerPool.Shutdown(); + + // check there were no problems during execution + if (workerPool.Status() != WPStatus::kOk) { + git_odb_free(odb); + return GIT_EUSER; + } + + git_odb_free(odb); + + return errorCode; +} + +/** + * RepoAnalysis::storeAndCountRefs + * Stores the oid and type of peeled references. + * Also counts total references. + */ +int RepoAnalysis::storeAndCountRefs() +{ + int errorCode {GIT_OK}; + git_strarray ref_list; + + // count refs + if ((errorCode = git_reference_list(&ref_list, m_repo)) != GIT_OK) { + return errorCode; + } + m_statistics.repositorySize.references.count = ref_list.count; + + // store refs info + for (size_t i = 0; i < ref_list.count; ++i) + { + // lookup ref + git_reference *ref {nullptr}; + const int refLookupError = git_reference_lookup(&ref, m_repo, ref_list.strings[i]); + if (refLookupError == GIT_ENOTFOUND || refLookupError == GIT_EINVALIDSPEC) { + continue; + } + else if (refLookupError != GIT_OK) { + git_strarray_dispose(&ref_list); + return refLookupError; + } + + // obtain peeled oid of the reference + bool found_oid = false; + git_oid oid_ref; + switch (git_reference_type(ref)) + { + case GIT_REFERENCE_DIRECT: + git_oid_cpy(&oid_ref, git_reference_target(ref)); + found_oid = true; + break; + + case GIT_REFERENCE_SYMBOLIC: + { + git_reference *ref_resolved {nullptr}; + if ((errorCode = git_reference_resolve(&ref_resolved, ref)) != GIT_OK) { + git_reference_free(ref); + git_strarray_dispose(&ref_list); + return errorCode; + } + git_oid_cpy(&oid_ref, git_reference_target(ref_resolved)); + found_oid = true; + git_reference_free(ref_resolved); + } + break; + + default: + break; + } + + // store object's oid and type + if (found_oid) + { + git_object *target {nullptr}; + if ((errorCode = git_object_lookup(&target, m_repo, &oid_ref, GIT_OBJECT_ANY)) != GIT_OK) { + git_reference_free(ref); + git_strarray_dispose(&ref_list); + return errorCode; + } + + m_peeledRefs.emplace(std::make_pair( + std::string(reinterpret_cast(oid_ref.id), GIT_OID_RAWSZ), + git_object_type(target))); + + git_object_free(target); + } + git_reference_free(ref); + } + git_strarray_dispose(&ref_list); + + return errorCode; +} + +/** + * RepoAnalysis::setObjectsReachability + * Leverages threads via a worker pool to + * set reachability from tags, commits, and trees. + * NOTE: the worker pool leveraged in this method runs at a different stage than the + * worker pool leveraged in previous stages, meaning they do not run at the same time, hence + * access to 'm_odbObjectsData->....info' won't suffer from a data race. + * NOTE: performance didn't improve leveraging threads for adding objects to unreachables container. + * \return false if the workerPool finished with errors; true otherwise + */ +bool RepoAnalysis::setObjectsReachability() +{ + // references are not objects, hence they won't be sent to the worker threads + setReachabilityFromRefs(); + + const unsigned int numThreads = + std::max(std::thread::hardware_concurrency(), static_cast(kMinThreads)); + std::vector< std::shared_ptr > workers {}; + for (unsigned int i = 0; i < numThreads; ++i) { + workers.emplace_back(std::make_shared(&m_odbObjectsData)); + } + + // initialize worker pool + WorkerPool workerPool {}; + workerPool.Init(workers); + + // NOTE: avoid queueing same type of objects in a row, so that different mutex can be used concurrently + uint8_t workInserted {0}; + OdbObjectsData::iterTagInfo itTagInfo = m_odbObjectsData.tags.info.begin(); + OdbObjectsData::iterCommitInfo itCommitInfo = m_odbObjectsData.commits.info.begin(); + OdbObjectsData::iterTreeInfo itTreeInfo = m_odbObjectsData.trees.info.begin(); + do { + workInserted = 0; + // insert tag + if (itTagInfo != m_odbObjectsData.tags.info.end()) { + workerPool.InsertWork(std::make_unique(&itTagInfo->second, GIT_OBJECT_TAG)); + ++itTagInfo; + ++workInserted; + } + // insert commmit + if (itCommitInfo != m_odbObjectsData.commits.info.end()) { + workerPool.InsertWork(std::make_unique(&itCommitInfo->second, GIT_OBJECT_COMMIT)); + ++itCommitInfo; + ++workInserted; + } + // insert tree + if (itTreeInfo != m_odbObjectsData.trees.info.end()) { + workerPool.InsertWork(std::make_unique(&itTreeInfo->second, GIT_OBJECT_TREE)); + ++itTreeInfo; + ++workInserted; + } + // blobs do not reach to any other object, hence no need to process them + } while (workInserted); + + // wait for the threads to finish and shutdown the work pool + workerPool.Shutdown(); + + // check there were no problems during execution + if (workerPool.Status() != WPStatus::kOk) { + return false; + } + + setUnreachables(); + + return true; +} + +/** + * RepoAnalysis::setReachabilityFromRefs + * Adds reachability counter where peeled refs point (normally a commit or a tag). + */ +void RepoAnalysis::setReachabilityFromRefs() +{ + for (const auto &ref : m_peeledRefs) { + switch (ref.second) { + case GIT_OBJECT_COMMIT: + { + OdbObjectsData::iterCommitInfo itCommitInfo = + m_odbObjectsData.commits.info.find(ref.first); + + if (itCommitInfo != m_odbObjectsData.commits.info.end()) { + ++itCommitInfo->second.reachability; + } + } + break; + case GIT_OBJECT_TREE: + { + OdbObjectsData::iterTreeInfo itTreeInfo = + m_odbObjectsData.trees.info.find(ref.first); + + if (itTreeInfo != m_odbObjectsData.trees.info.end()) { + ++itTreeInfo->second.reachability; + } + } + break; + case GIT_OBJECT_BLOB: + { + OdbObjectsData::iterBlobInfo itBlobInfo = + m_odbObjectsData.blobs.info.find(ref.first); + + if (itBlobInfo != m_odbObjectsData.blobs.info.end()) { + ++itBlobInfo->second.reachability; + } + } + break; + case GIT_OBJECT_TAG: + { + OdbObjectsData::iterTagInfo itTagInfo = + m_odbObjectsData.tags.info.find(ref.first); + + if (itTagInfo != m_odbObjectsData.tags.info.end()) { + ++itTagInfo->second.reachability; + } + } + break; + default: + break; + } + } +} + +/** + * RepoAnalysis::setUnreachables + * After setting reachability, we add the unreached objects to their unreachables container. + */ +void RepoAnalysis::setUnreachables() +{ + for (const auto &tag : m_odbObjectsData.tags.info) { + if (!tag.second.reachability) { + m_odbObjectsData.tags.unreachables.emplace(tag.first); + } + } + for (const auto &commit : m_odbObjectsData.commits.info) { + if (!commit.second.reachability) { + m_odbObjectsData.commits.unreachables.emplace(commit.first); + } + } + for (const auto &tree : m_odbObjectsData.trees.info) { + if (!tree.second.reachability) { + m_odbObjectsData.trees.unreachables.emplace(tree.first); + } + } + for (const auto &blob : m_odbObjectsData.blobs.info) { + if (!blob.second.reachability) { + m_odbObjectsData.blobs.unreachables.emplace(blob.first); + } + } +} + +/** + * RepoAnalysis::pruneUnreachables + * Removes from their containers the unreachable objects. + * Decreases reachability of the objects they can reach. + */ +void RepoAnalysis::pruneUnreachables() +{ + // NOTE: order is important here, since each method prunes its own objects, but + // only decreases reachability of the objects connected to it; and those + // connected objects will be checked and pruned afterwards. + pruneUnreachableTags(); + pruneUnreachableCommits(); + pruneUnreachableTrees(); + pruneUnreachableBlobs(); +} + +/** + * RepoAnalysis::pruneUnreachableTags + * Prune tags and their chained tags if they become unreachable. + * Also decreases reachability of targets. + */ +void RepoAnalysis::pruneUnreachableTags() +{ + while (!m_odbObjectsData.tags.unreachables.empty()) { + std::unordered_set newUnreachables {}; + + // erase unreachable tags + for (OdbObjectsData::iterUnreachable itTagUnrch = m_odbObjectsData.tags.unreachables.begin(); + itTagUnrch != m_odbObjectsData.tags.unreachables.end(); ++itTagUnrch) + { + OdbObjectsData::iterTagInfo itTagInfo = m_odbObjectsData.tags.info.find(*itTagUnrch); + + if (itTagInfo != m_odbObjectsData.tags.info.end()) { + const std::string &oidTarget = itTagInfo->second.oidTarget; + switch (itTagInfo->second.typeTarget) { + case GIT_OBJECT_TAG: + { + // if target is another tag, add it to newUnreachables + OdbObjectsData::iterTagInfo itTargetTagInfo = m_odbObjectsData.tags.info.find(oidTarget); + if (itTargetTagInfo != m_odbObjectsData.tags.info.end()) { + if (--itTargetTagInfo->second.reachability == OdbObjectsData::kUnreachable) { + newUnreachables.emplace(itTargetTagInfo->first); + } + } + } + break; + case GIT_OBJECT_COMMIT: + { + OdbObjectsData::iterCommitInfo itCommitInfo = m_odbObjectsData.commits.info.find(oidTarget); + if (itCommitInfo != m_odbObjectsData.commits.info.end()) { + if (--itCommitInfo->second.reachability == OdbObjectsData::kUnreachable) { + m_odbObjectsData.commits.unreachables.emplace(itCommitInfo->first); + } + } + } + break; + case GIT_OBJECT_TREE: + { + OdbObjectsData::iterTreeInfo itTreeInfo = m_odbObjectsData.trees.info.find(oidTarget); + if (itTreeInfo != m_odbObjectsData.trees.info.end()) { + if (--itTreeInfo->second.reachability == OdbObjectsData::kUnreachable) { + m_odbObjectsData.trees.unreachables.emplace(itTreeInfo->first); + } + } + } + break; + case GIT_OBJECT_BLOB: + { + OdbObjectsData::iterBlobInfo itBlobInfo = m_odbObjectsData.blobs.info.find(oidTarget); + if (itBlobInfo != m_odbObjectsData.blobs.info.end()) { + if (--itBlobInfo->second.reachability == OdbObjectsData::kUnreachable) { + m_odbObjectsData.blobs.unreachables.emplace(itBlobInfo->first); + } + } + } + break; + default: + break; + } + // erase tag from the tag's container + m_odbObjectsData.tags.info.erase(itTagInfo); + } + } + // set new unreachable tags + m_odbObjectsData.tags.unreachables = std::move(newUnreachables); + } +} + +/** + * RepoAnalysis::pruneUnreachableCommits + * Prune commits and decrease reachability of their associated trees. + */ +void RepoAnalysis::pruneUnreachableCommits() +{ + while (!m_odbObjectsData.commits.unreachables.empty()) { + std::unordered_set newUnreachables {}; + + // erase unreachable commits + for (OdbObjectsData::iterUnreachable itCommitUnrch = m_odbObjectsData.commits.unreachables.begin(); + itCommitUnrch != m_odbObjectsData.commits.unreachables.end(); ++itCommitUnrch) + { + OdbObjectsData::iterCommitInfo itCommitInfo = m_odbObjectsData.commits.info.find(*itCommitUnrch); + + if (itCommitInfo != m_odbObjectsData.commits.info.end()) + { + // decrease commit's parents reachability and add them as newUnreachable + const size_t numParents = itCommitInfo->second.parents.size(); + for (size_t i = 0; i < numParents; ++i) { + OdbObjectsData::iterCommitInfo itParentCommitInfo = + m_odbObjectsData.commits.info.find(itCommitInfo->second.parents.at(i)); + + if (itParentCommitInfo != m_odbObjectsData.commits.info.end()) { + if (--itParentCommitInfo->second.reachability == OdbObjectsData::kUnreachable) { + newUnreachables.emplace(itParentCommitInfo->first); + } + } + } + // decrease reachability of the commit's tree + OdbObjectsData::iterTreeInfo itTreeInfo = + m_odbObjectsData.trees.info.find(itCommitInfo->second.oidTree); + if (itTreeInfo != m_odbObjectsData.trees.info.end()) { + if (--itTreeInfo->second.reachability == OdbObjectsData::kUnreachable) { + m_odbObjectsData.trees.unreachables.emplace(itTreeInfo->first); + } + } + // erase commit from the commit's container + m_odbObjectsData.commits.info.erase(itCommitInfo); + } + } + // set new unreachable commits + m_odbObjectsData.commits.unreachables = std::move(newUnreachables); + } +} + +/** + * RepoAnalysis::pruneUnreachableTrees + * Prune unreachable trees and decrement reachability of their entries. + */ +void RepoAnalysis::pruneUnreachableTrees() +{ + while (!m_odbObjectsData.trees.unreachables.empty()) { + std::unordered_set newUnreachables {}; + + // erase unreachable trees + for (OdbObjectsData::iterUnreachable itTreeUnrch = m_odbObjectsData.trees.unreachables.begin(); + itTreeUnrch != m_odbObjectsData.trees.unreachables.end(); ++itTreeUnrch) + { + OdbObjectsData::iterTreeInfo itTreeInfo = m_odbObjectsData.trees.info.find(*itTreeUnrch); + + if (itTreeInfo != m_odbObjectsData.trees.info.end()) { + // decrease reachability of the entry blobs + for (auto &blob : itTreeInfo->second.entryBlobs) { + OdbObjectsData::iterBlobInfo itEntryBlobInfo = m_odbObjectsData.blobs.info.find(blob); + if (itEntryBlobInfo != m_odbObjectsData.blobs.info.end()) { + if (--itEntryBlobInfo->second.reachability == OdbObjectsData::kUnreachable) { + m_odbObjectsData.blobs.unreachables.emplace(blob); + } + } + } + // decrease reachability of the entry trees and add them as newUnreachables + for (auto &treeNameLen : itTreeInfo->second.entryTreesNameLen) { + OdbObjectsData::iterTreeInfo itEntryTreeInfo = + m_odbObjectsData.trees.info.find(treeNameLen.first); + if (itEntryTreeInfo != m_odbObjectsData.trees.info.end()) { + if (--itEntryTreeInfo->second.reachability == OdbObjectsData::kUnreachable) { + newUnreachables.emplace(treeNameLen.first); + } + } + } + // erase tree from the tree's container + m_odbObjectsData.trees.info.erase(itTreeInfo); + } + } + // set new unreachable trees + m_odbObjectsData.trees.unreachables = std::move(newUnreachables); + } +} + +/** + * RepoAnalysis::pruneUnreachableBlobs + * Rremoves unreachable blobs from their container. + */ +void RepoAnalysis::pruneUnreachableBlobs() +{ + for (OdbObjectsData::iterUnreachable itBlobUnrch = m_odbObjectsData.blobs.unreachables.begin(); + itBlobUnrch != m_odbObjectsData.blobs.unreachables.end(); ++itBlobUnrch) + { + m_odbObjectsData.blobs.info.erase(*itBlobUnrch); + } +} + +/** + * RepoAnalysis::statsCountAndMax + * Statistics for repositorySize (count objects) and biggestObjects (get maximum of them). + * Also builds the commits graph. + * NOTE: better results achieved not leveraging threads. + */ +void RepoAnalysis::statsCountAndMax() +{ + // commits + for (auto &info : m_odbObjectsData.commits.info) { + OdbObjectsData::CommitInfo &commitInfo = info.second; + const size_t objectSize = commitInfo.size; + + m_odbObjectsData.commits.totalSize += objectSize; + m_odbObjectsData.commits.maxSize = std::max(m_odbObjectsData.commits.maxSize, objectSize); + m_odbObjectsData.commits.maxParents = std::max( + m_odbObjectsData.commits.maxParents, commitInfo.parents.size()); + + // build commit's graph + m_odbObjectsData.commits.graph.AddNode(info.first, commitInfo.parents); + } + // trees + for (auto &info : m_odbObjectsData.trees.info) { + OdbObjectsData::TreeInfoAndStats &treeInfo = info.second; + const size_t numEntries = treeInfo.numEntries; + const size_t objectSize = treeInfo.size; + + m_odbObjectsData.trees.totalSize += objectSize; + m_odbObjectsData.trees.totalEntries += numEntries; + m_odbObjectsData.trees.maxEntries = std::max(m_odbObjectsData.trees.maxEntries, numEntries); + } + // blobs + for (auto &info : m_odbObjectsData.blobs.info) { + OdbObjectsData::BlobInfo &blobInfo = info.second; + const size_t objectSize = blobInfo.size; + + m_odbObjectsData.blobs.totalSize += objectSize; + m_odbObjectsData.blobs.maxSize = std::max(m_odbObjectsData.blobs.maxSize, objectSize); + } + // no need to process tags here (we already have the count) +} + +/** + * RepoAnalysis::statsHistoryAndBiggestCheckouts + * Statistics for historyStructure and biggestCheckouts. + * \return true if success; false if something went wrong. + */ +bool RepoAnalysis::statsHistoryAndBiggestCheckouts() +{ + if (!calculateBiggestCheckouts()) { + return false; + } + + if (!calculateMaxTagDepth()) { + return false; + } + + // calculate max commit history depth + m_statistics.historyStructure.maxDepth = m_odbObjectsData.commits.graph.CalculateMaxDepth(); + + return true; +} + +/** + * RepoAnalysis::calculateBiggestCheckouts + * + * Once threads have collected data from objects and unreachable objects + * have been pruned, biggest checkouts can be calculated. + * Threads have already collected partial non-recursive tree statistics. + * \return true if success; false if something went wrong. + */ +bool RepoAnalysis::calculateBiggestCheckouts() +{ + for (auto &commitInfo : m_odbObjectsData.commits.info) + { + // calculate this commit's data + const std::string &commitOidTree = commitInfo.second.oidTree; + + OdbObjectsData::iterTreeInfo itTreeInfo {}; + if ((itTreeInfo = calculateTreeStatistics(commitOidTree)) == m_odbObjectsData.trees.info.end()) { + return false; + } + + // update biggestCheckouts data + OdbObjectsData::TreeInfoAndStats &treeInfoAndStats = itTreeInfo->second; + m_statistics.biggestCheckouts.numDirectories = std::max( + m_statistics.biggestCheckouts.numDirectories, treeInfoAndStats.stats.numDirectories); + m_statistics.biggestCheckouts.totalFileSize = std::max( + m_statistics.biggestCheckouts.totalFileSize, treeInfoAndStats.stats.totalFileSize); + m_statistics.biggestCheckouts.maxPathDepth = std::max( + m_statistics.biggestCheckouts.maxPathDepth, treeInfoAndStats.stats.maxPathDepth); + m_statistics.biggestCheckouts.numFiles = std::max( + m_statistics.biggestCheckouts.numFiles, treeInfoAndStats.stats.numFiles); + m_statistics.biggestCheckouts.maxPathLength = std::max( + m_statistics.biggestCheckouts.maxPathLength, treeInfoAndStats.stats.maxPathLength); + m_statistics.biggestCheckouts.numSymlinks = std::max( + m_statistics.biggestCheckouts.numSymlinks, treeInfoAndStats.stats.numSymlinks); + m_statistics.biggestCheckouts.numSubmodules = std::max( + m_statistics.biggestCheckouts.numSubmodules, treeInfoAndStats.stats.numSubmodules); + } + + return true; +} + +/** + * RepoAnalysis::calculateTreeStatistics + * + * Calculates tree statistics recursively, considering individual tree's statistics + * have already been calculated. + * The maximum number of recursive calls depend directly on the maximum path depth of + * the repository. For instance, the linux repository have a maximum path depth of 13, + * so it should be safe against stack overflow. + * Returns an iterator to the tree info container, or to end if something went wrong. + */ +OdbObjectsData::iterTreeInfo RepoAnalysis::calculateTreeStatistics(const std::string &oidTree) +{ + OdbObjectsData::iterTreeInfo itTreeInfo = m_odbObjectsData.trees.info.find(oidTree); + if (itTreeInfo == m_odbObjectsData.trees.info.end()) { + return itTreeInfo; + } + + OdbObjectsData::TreeInfoAndStats &treeInfoAndStats = itTreeInfo->second; + + // prune recursivity + if (treeInfoAndStats.statsDone) { + return itTreeInfo; + } + + ++treeInfoAndStats.stats.numDirectories; + ++treeInfoAndStats.stats.maxPathDepth; + // the following partial statistics have also been calculated in previous stage with threads: + // - treeInfoAndStats.stats.numFiles + // - treeInfoAndStats.stats.maxPathLength + // - treeInfoAndStats.stats.numSymLinks + // - treeInfoAndStats.stats.numSubmodules + + // totalFileSize + OdbObjectsData::iterBlobInfo itBlobInfo {}; + for (auto &oidBlob : treeInfoAndStats.entryBlobs) + { + if ((itBlobInfo = m_odbObjectsData.blobs.info.find(oidBlob)) == m_odbObjectsData.blobs.info.end()) { + return m_odbObjectsData.trees.info.end(); // to let the caller know that something went wrong + } + + treeInfoAndStats.stats.totalFileSize += itBlobInfo->second.size; + } + + // recursively into subtrees + for (const auto &subTreeNameLen : treeInfoAndStats.entryTreesNameLen) + { + OdbObjectsData::iterTreeInfo itSubTreeInfo {}; + if ((itSubTreeInfo = calculateTreeStatistics(subTreeNameLen.first)) == + m_odbObjectsData.trees.info.end()) { + return itSubTreeInfo; + } + + OdbObjectsData::TreeInfoAndStats &subTreeInfoAndStats = itSubTreeInfo->second; + treeInfoAndStats.stats.numDirectories += subTreeInfoAndStats.stats.numDirectories; + treeInfoAndStats.stats.maxPathDepth = std::max(treeInfoAndStats.stats.maxPathDepth, + subTreeInfoAndStats.stats.maxPathDepth + 1); + treeInfoAndStats.stats.maxPathLength = std::max(treeInfoAndStats.stats.maxPathLength, + subTreeNameLen.second + 1 + subTreeInfoAndStats.stats.maxPathLength); + treeInfoAndStats.stats.numFiles += subTreeInfoAndStats.stats.numFiles; + treeInfoAndStats.stats.totalFileSize += subTreeInfoAndStats.stats.totalFileSize; + treeInfoAndStats.stats.numSymlinks += subTreeInfoAndStats.stats.numSymlinks; + treeInfoAndStats.stats.numSubmodules += subTreeInfoAndStats.stats.numSubmodules; + } + + treeInfoAndStats.statsDone = true; + + return itTreeInfo; +} + +/** + * RepoAnalysis::calculateMaxTagDepth + * \return true if success; false if something went wrong. + */ +bool RepoAnalysis::calculateMaxTagDepth() +{ + for (auto &tag : m_odbObjectsData.tags.info) + { + OdbObjectsData::iterTagInfo itTagInfo {}; + if ((itTagInfo = calculateTagDepth(tag.first)) == m_odbObjectsData.tags.info.end()) { + return false; + } + + // update maxTagDepth + OdbObjectsData::TagInfo &tagInfo = itTagInfo->second; + m_statistics.historyStructure.maxTagDepth = std::max( + m_statistics.historyStructure.maxTagDepth, tagInfo.depth); + } + + return true; +} + +/** + * RepoAnalysis::calculateTagDepth + * + * Calculates recursively the tag depth of the oidTag passed as a parameter. + * Returns an iterator to the tag info container, or to end if something went wrong. + */ +OdbObjectsData::iterTagInfo RepoAnalysis::calculateTagDepth(const std::string &oidTag) +{ + OdbObjectsData::iterTagInfo itTagInfo = m_odbObjectsData.tags.info.find(oidTag); + if (itTagInfo == m_odbObjectsData.tags.info.end()) { + return itTagInfo; + } + + OdbObjectsData::TagInfo &tagInfo = itTagInfo->second; + + // prune recursivity + if (tagInfo.depth != OdbObjectsData::TagInfo::kUnsetDepth) { + return itTagInfo; + } + + ++tagInfo.depth; + + if (tagInfo.typeTarget == GIT_OBJECT_TAG) + { + OdbObjectsData::iterTagInfo itChainedTagInfo {}; + if ((itChainedTagInfo = calculateTagDepth(tagInfo.oidTarget)) == m_odbObjectsData.tags.info.end()) { + return itChainedTagInfo; + } + + OdbObjectsData::TagInfo &chainedTagInfo = itChainedTagInfo->second; + tagInfo.depth += chainedTagInfo.depth; + } + + return itTagInfo; +} + +/** + * RepoAnalysis::fillOutStatistics + */ +void RepoAnalysis::fillOutStatistics() +{ + m_statistics.repositorySize.commits.count = m_odbObjectsData.commits.info.size(); + m_statistics.repositorySize.commits.size = m_odbObjectsData.commits.totalSize; + m_statistics.repositorySize.trees.count = m_odbObjectsData.trees.info.size(); + m_statistics.repositorySize.trees.size = m_odbObjectsData.trees.totalSize; + m_statistics.repositorySize.trees.entries = m_odbObjectsData.trees.totalEntries; + m_statistics.repositorySize.blobs.count = m_odbObjectsData.blobs.info.size(); + m_statistics.repositorySize.blobs.size = m_odbObjectsData.blobs.totalSize; + m_statistics.repositorySize.annotatedTags.count = m_odbObjectsData.tags.info.size(); + + m_statistics.biggestObjects.commits.maxSize = m_odbObjectsData.commits.maxSize; + m_statistics.biggestObjects.commits.maxParents = m_odbObjectsData.commits.maxParents; + m_statistics.biggestObjects.trees.maxEntries = m_odbObjectsData.trees.maxEntries; + m_statistics.biggestObjects.blobs.maxSize = m_odbObjectsData.blobs.maxSize; + + // m_statistics.biggestCheckouts have already been filled out while running +} + +/** + * RepoAnalysis::repositorySizeToJS + */ +v8::Local RepoAnalysis::repositorySizeToJS() const +{ + v8::Local commits = Nan::New(); + Nan::Set(commits, Nan::New("count").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.commits.count)); + Nan::Set(commits, Nan::New("size").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.commits.size)); + + v8::Local trees = Nan::New(); + Nan::Set(trees, Nan::New("count").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.trees.count)); + Nan::Set(trees, Nan::New("size").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.trees.size)); + Nan::Set(trees, Nan::New("entries").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.trees.entries)); + + v8::Local blobs = Nan::New(); + Nan::Set(blobs, Nan::New("count").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.blobs.count)); + Nan::Set(blobs, Nan::New("size").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.blobs.size)); + + v8::Local annotatedTags = Nan::New(); + Nan::Set(annotatedTags, Nan::New("count").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.annotatedTags.count)); + + v8::Local references = Nan::New(); + Nan::Set(references, Nan::New("count").ToLocalChecked(), + Nan::New(m_statistics.repositorySize.references.count)); + + v8::Local result = Nan::New(); + Nan::Set(result, Nan::New("commits").ToLocalChecked(), commits); + Nan::Set(result, Nan::New("trees").ToLocalChecked(), trees); + Nan::Set(result, Nan::New("blobs").ToLocalChecked(), blobs); + Nan::Set(result, Nan::New("annotatedTags").ToLocalChecked(), annotatedTags); + Nan::Set(result, Nan::New("references").ToLocalChecked(), references); + + return result; +} + +/** + * RepoAnalysis::biggestObjectsToJS + */ +v8::Local RepoAnalysis::biggestObjectsToJS() const +{ + v8::Local commits = Nan::New(); + Nan::Set(commits, Nan::New("maxSize").ToLocalChecked(), + Nan::New(m_statistics.biggestObjects.commits.maxSize)); + Nan::Set(commits, Nan::New("maxParents").ToLocalChecked(), + Nan::New(m_statistics.biggestObjects.commits.maxParents)); + + v8::Local trees = Nan::New(); + Nan::Set(trees, Nan::New("maxEntries").ToLocalChecked(), + Nan::New(m_statistics.biggestObjects.trees.maxEntries)); + + v8::Local blobs = Nan::New(); + Nan::Set(blobs, Nan::New("maxSize").ToLocalChecked(), + Nan::New(m_statistics.biggestObjects.blobs.maxSize)); + + v8::Local result = Nan::New(); + Nan::Set(result, Nan::New("commits").ToLocalChecked(), commits); + Nan::Set(result, Nan::New("trees").ToLocalChecked(), trees); + Nan::Set(result, Nan::New("blobs").ToLocalChecked(), blobs); + + return result; +} + +/** + * RepoAnalysis::historyStructureToJS + */ +v8::Local RepoAnalysis::historyStructureToJS() const +{ + v8::Local result = Nan::New(); + Nan::Set(result, Nan::New("maxDepth").ToLocalChecked(), + Nan::New(m_statistics.historyStructure.maxDepth)); + Nan::Set(result, Nan::New("maxTagDepth").ToLocalChecked(), + Nan::New(m_statistics.historyStructure.maxTagDepth)); + + return result; +} + +/** + * RepoAnalysis::biggestCheckoutsToJS + */ +v8::Local RepoAnalysis::biggestCheckoutsToJS() const +{ + v8::Local result = Nan::New(); + Nan::Set(result, Nan::New("numDirectories").ToLocalChecked(), + Nan::New(m_statistics.biggestCheckouts.numDirectories)); + Nan::Set(result, Nan::New("maxPathDepth").ToLocalChecked(), + Nan::New(m_statistics.biggestCheckouts.maxPathDepth)); + Nan::Set(result, Nan::New("maxPathLength").ToLocalChecked(), + Nan::New(m_statistics.biggestCheckouts.maxPathLength)); + Nan::Set(result, Nan::New("numFiles").ToLocalChecked(), + Nan::New(m_statistics.biggestCheckouts.numFiles)); + Nan::Set(result, Nan::New("totalFileSize").ToLocalChecked(), + Nan::New(m_statistics.biggestCheckouts.totalFileSize)); + Nan::Set(result, Nan::New("numSymlinks").ToLocalChecked(), + Nan::New(m_statistics.biggestCheckouts.numSymlinks)); + Nan::Set(result, Nan::New("numSubmodules").ToLocalChecked(), + Nan::New(m_statistics.biggestCheckouts.numSubmodules)); + + return result; +} + +NAN_METHOD(GitRepository::Statistics) +{ + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + StatisticsBaton* baton = new StatisticsBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->repo = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + baton->out = static_cast(new RepoAnalysis(baton->repo)); + + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + StatisticsWorker *worker = new StatisticsWorker(baton, callback, cleanupHandles); + worker->Reference("repo", info.This()); + nodegit::Context *nodegitContext = + reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + return; +} + +nodegit::LockMaster GitRepository::StatisticsWorker::AcquireLocks() +{ + nodegit::LockMaster lockMaster(true, baton->repo); + + return lockMaster; +} + +void GitRepository::StatisticsWorker::Execute() +{ + git_error_clear(); + + RepoAnalysis *repoAnalysis = static_cast(baton->out); + if ((baton->error_code = repoAnalysis->Analyze()) != GIT_OK) + { + if (git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } + + delete repoAnalysis; + baton->out = nullptr; + } +} + +void GitRepository::StatisticsWorker::HandleErrorCallback() +{ + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + RepoAnalysis *repoAnalysis = static_cast(baton->out); + if (repoAnalysis) { + delete repoAnalysis; + } + + delete baton; +} + +void GitRepository::StatisticsWorker::HandleOKCallback() +{ + if (baton->out != NULL) + { + RepoAnalysis *repoAnalysis = static_cast(baton->out); + Local result = repoAnalysis->StatisticsToJS(); + + delete repoAnalysis; + + Local argv[2] = { + Nan::Null(), + result + }; + callback->Call(2, argv, async_resource); + } + else if (baton->error) + { + Local err; + + if (baton->error->message) { + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); + } else { + err = Nan::To(Nan::Error("Method statistics has thrown an error.")).ToLocalChecked(); + } + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("GitRepository.statistics").ToLocalChecked()); + Local argv[1] = { + err + }; + + callback->Call(1, argv, async_resource); + + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + else if (baton->error_code < 0) + { + Local err = Nan::To(Nan::Error("Method statistics has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("GitRepository.statistics").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + } + else + { + callback->Call(0, NULL, async_resource); + } + + delete baton; +} \ No newline at end of file diff --git a/generate/templates/manual/revwalk/commit_walk.cc b/generate/templates/manual/revwalk/commit_walk.cc new file mode 100644 index 0000000000..4fe60de9e1 --- /dev/null +++ b/generate/templates/manual/revwalk/commit_walk.cc @@ -0,0 +1,275 @@ +#define SET_ON_OBJECT(obj, field, data) Nan::Set(obj, Nan::New(field).ToLocalChecked(), data) + +v8::Local signatureToJavascript(const git_signature *signature) { + v8::Local signatureObject = Nan::New(); + SET_ON_OBJECT(signatureObject, "name", Nan::New(signature->name).ToLocalChecked()); + SET_ON_OBJECT(signatureObject, "email", Nan::New(signature->email).ToLocalChecked()); + SET_ON_OBJECT(signatureObject, "date", Nan::New(signature->when.time * 1000)); + std::stringstream fullSignature; + fullSignature << signature->name << " <" << signature << ">"; + SET_ON_OBJECT(signatureObject, "full", Nan::New(fullSignature.str()).ToLocalChecked()); + return signatureObject; +} + +#include +class CommitModel { +public: + CommitModel(git_commit *commit, bool fetchSignature): + commit(commit), + fetchSignature(fetchSignature), + signature({ 0, 0, 0 }), + signedData({ 0, 0, 0 }) + { + if (fetchSignature) { + const int error = git_commit_extract_signature( + &signature, + &signedData, + git_commit_owner(commit), + const_cast(git_commit_id(commit)), + NULL + ); + if (error != GIT_ENOTFOUND) { + assert(error == GIT_OK); + } + } + + const size_t parentCount = git_commit_parentcount(commit); + parentIds.reserve(parentCount); + for (size_t parentIndex = 0; parentIndex < parentCount; ++parentIndex) { + parentIds.push_back(git_oid_tostr_s(git_commit_parent_id(commit, parentIndex))); + } + } + + CommitModel(const CommitModel &) = delete; + CommitModel(CommitModel &&) = delete; + CommitModel &operator=(const CommitModel &) = delete; + CommitModel &operator=(CommitModel &&) = delete; + + v8::Local toJavascript() { + if (!fetchSignature) { + v8::Local commitObject = GitCommit::New( + commit, + true, + Nan::To(GitRepository::New( + git_commit_owner(commit), + true + )).ToLocalChecked() + ); + commit = NULL; + return commitObject; + } + + v8::Local commitModel = Nan::New(); + SET_ON_OBJECT(commitModel, "sha", Nan::New(git_oid_tostr_s(git_commit_id(commit))).ToLocalChecked()); + SET_ON_OBJECT(commitModel, "message", Nan::New(git_commit_message(commit)).ToLocalChecked()); + SET_ON_OBJECT(commitModel, "author", signatureToJavascript(git_commit_author(commit))); + SET_ON_OBJECT(commitModel, "committer", signatureToJavascript(git_commit_committer(commit))); + + size_t parentCount = parentIds.size(); + v8::Local parents = Nan::New(parentCount); + for (size_t parentIndex = 0; parentIndex < parentCount; ++parentIndex) { + Nan::Set(parents, Nan::New(parentIndex), Nan::New(parentIds[parentIndex]).ToLocalChecked()); + } + SET_ON_OBJECT(commitModel, "parents", parents); + + if (signature.size != 0 || signedData.size != 0) { + v8::Local gpgSignature = Nan::New(); + if (signature.size != 0) { + SET_ON_OBJECT(gpgSignature, "signature", Nan::New(signature.ptr).ToLocalChecked()); + } else { + SET_ON_OBJECT(gpgSignature, "signature", Nan::Null()); + } + + if (signedData.size != 0) { + SET_ON_OBJECT(gpgSignature, "signedData", Nan::New(signedData.ptr).ToLocalChecked()); + } else { + SET_ON_OBJECT(gpgSignature, "signedData", Nan::Null()); + } + + SET_ON_OBJECT(commitModel, "gpgSignature", gpgSignature); + } + + return commitModel; + } + + ~CommitModel() { + git_buf_dispose(&signature); + git_buf_dispose(&signedData); + if (commit) { + git_commit_free(commit); + } + } + +private: + git_commit *commit; + bool fetchSignature; + git_buf signature, signedData; + std::vector parentIds; +}; + +NAN_METHOD(GitRevwalk::CommitWalk) { + if (info.Length() == 0 || !info[0]->IsNumber()) { + return Nan::ThrowError("Max count is required and must be a number."); + } + + if (info.Length() >= 3 && !info[1]->IsNull() && !info[1]->IsUndefined() && !info[1]->IsObject()) { + return Nan::ThrowError("Options must be an object, null, or undefined."); + } + + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + CommitWalkBaton* baton = new CommitWalkBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->max_count = Nan::To(info[0]).FromJust(); + std::vector *out = new std::vector; + out->reserve(baton->max_count); + baton->out = static_cast(out); + if (info.Length() == 3 && info[1]->IsObject()) { + v8::Local options = Nan::To(info[1]).ToLocalChecked(); + v8::Local propName = Nan::New("returnPlainObjects").ToLocalChecked(); + if (Nan::Has(options, propName).FromJust()) { + baton->returnPlainObjects = Nan::Get(options, propName).ToLocalChecked()->IsTrue(); + } else { + baton->returnPlainObjects = false; + } + } else { + baton->returnPlainObjects = false; + } + baton->walk = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + CommitWalkWorker *worker = new CommitWalkWorker(baton, callback, cleanupHandles); + worker->Reference("commitWalk", info.This()); + + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + return; +} + +nodegit::LockMaster GitRevwalk::CommitWalkWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true); + return lockMaster; +} + +void GitRevwalk::CommitWalkWorker::Execute() { + giterr_clear(); + + std::vector *out = static_cast *>(baton->out); + for (int i = 0; i < baton->max_count; i++) { + git_oid next_commit_id; + baton->error_code = git_revwalk_next(&next_commit_id, baton->walk); + + if (baton->error_code == GIT_ITEROVER) { + baton->error_code = GIT_OK; + return; + } + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + + while (out->size()) { + delete out->back(); + out->pop_back(); + } + + delete out; + baton->out = NULL; + + return; + } + + git_commit *commit; + baton->error_code = git_commit_lookup(&commit, git_revwalk_repository(baton->walk), &next_commit_id); + + if (baton->error_code != GIT_OK) { + if (giterr_last() != NULL) { + baton->error = git_error_dup(giterr_last()); + } + + while (out->size()) { + delete out->back(); + out->pop_back(); + } + + delete out; + baton->out = NULL; + + return; + } + + out->push_back(new CommitModel(commit, baton->returnPlainObjects)); + } +} + +void GitRevwalk::CommitWalkWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + auto out = static_cast *>(baton->out); + while (out->size()) { + delete out->back(); + out->pop_back(); + } + + delete out; + + delete baton; +} + +void GitRevwalk::CommitWalkWorker::HandleOKCallback() { + if (baton->out != NULL) { + std::vector *out = static_cast *>(baton->out); + const unsigned int size = out->size(); + Local result = Nan::New(size); + for (unsigned int i = 0; i < size; i++) { + CommitModel *commitModel = out->at(i); + Nan::Set( + result, + Nan::New(i), + commitModel->toJavascript() + ); + delete commitModel; + } + + delete out; + + Local argv[2] = { + Nan::Null(), + result + }; + callback->Call(2, argv, async_resource); + } else if (baton->error) { + Local argv[1] = { + Nan::Error(baton->error->message) + }; + callback->Call(1, argv, async_resource); + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } else if (baton->error_code < 0) { + Local err = Nan::To(Nan::Error("Revwalk commitWalk has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.commitWalk").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + } else { + callback->Call(0, NULL, async_resource); + } + + delete baton; +} diff --git a/generate/templates/manual/revwalk/fast_walk.cc b/generate/templates/manual/revwalk/fast_walk.cc index 8969bcb0fa..ce2d05a2d3 100644 --- a/generate/templates/manual/revwalk/fast_walk.cc +++ b/generate/templates/manual/revwalk/fast_walk.cc @@ -4,11 +4,11 @@ NAN_METHOD(GitRevwalk::FastWalk) return Nan::ThrowError("Max count is required and must be a number."); } - if (info.Length() == 1 || !info[1]->IsFunction()) { + if (!info[info.Length() - 1]->IsFunction()) { return Nan::ThrowError("Callback is required and must be a Function."); } - FastWalkBaton* baton = new FastWalkBaton; + FastWalkBaton* baton = new FastWalkBaton(); baton->error_code = GIT_OK; baton->error = NULL; @@ -17,20 +17,27 @@ NAN_METHOD(GitRevwalk::FastWalk) baton->out->reserve(baton->max_count); baton->walk = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); - Nan::Callback *callback = new Nan::Callback(Local::Cast(info[1])); - FastWalkWorker *worker = new FastWalkWorker(baton, callback); - worker->SaveToPersistent("fastWalk", info.This()); + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + FastWalkWorker *worker = new FastWalkWorker(baton, callback, cleanupHandles); + worker->Reference("fastWalk", info.This()); - Nan::AsyncQueueWorker(worker); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitRevwalk::FastWalkWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true); + return lockMaster; +} + void GitRevwalk::FastWalkWorker::Execute() { for (int i = 0; i < baton->max_count; i++) { git_oid *nextCommit = (git_oid *)malloc(sizeof(git_oid)); - giterr_clear(); + git_error_clear(); baton->error_code = git_revwalk_next(nextCommit, baton->walk); if (baton->error_code != GIT_OK) @@ -40,7 +47,7 @@ void GitRevwalk::FastWalkWorker::Execute() free(nextCommit); if (baton->error_code != GIT_ITEROVER) { - baton->error = git_error_dup(giterr_last()); + baton->error = git_error_dup(git_error_last()); while(!baton->out->empty()) { @@ -66,6 +73,25 @@ void GitRevwalk::FastWalkWorker::Execute() } } +void GitRevwalk::FastWalkWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + while(!baton->out->empty()) { + free(baton->out->back()); + baton->out->pop_back(); + } + + delete baton->out; + + delete baton; +} + void GitRevwalk::FastWalkWorker::HandleOKCallback() { if (baton->out != NULL) @@ -90,12 +116,12 @@ void GitRevwalk::FastWalkWorker::HandleOKCallback() { Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method fastWalk has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method fastWalk has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fastWalk").ToLocalChecked()); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fastWalk").ToLocalChecked()); Local argv[1] = { err }; @@ -109,58 +135,23 @@ void GitRevwalk::FastWalkWorker::HandleOKCallback() } else if (baton->error_code < 0) { - std::queue< Local > workerArguments; bool callbackFired = false; - - while(!workerArguments.empty()) - { - Local node = workerArguments.front(); - workerArguments.pop(); - - if ( - !node->IsObject() - || node->IsArray() - || node->IsBooleanObject() - || node->IsDate() - || node->IsFunction() - || node->IsNumberObject() - || node->IsRegExp() - || node->IsStringObject() - ) - { - continue; - } - - Local nodeObj = node->ToObject(); - Local checkValue = GetPrivate(nodeObj, Nan::New("NodeGitPromiseError").ToLocalChecked()); - - if (!checkValue.IsEmpty() && !checkValue->IsNull() && !checkValue->IsUndefined()) - { - Local argv[1] = { - checkValue->ToObject() + if (!callbackErrorHandle.IsEmpty()) { + v8::Local maybeError = Nan::New(callbackErrorHandle); + if (!maybeError->IsNull() && !maybeError->IsUndefined()) { + v8::Local argv[1] = { + maybeError }; callback->Call(1, argv, async_resource); callbackFired = true; - break; - } - - Local properties = nodeObj->GetPropertyNames(); - for (unsigned int propIndex = 0; propIndex < properties->Length(); ++propIndex) - { - Local propName = properties->Get(propIndex)->ToString(); - Local nodeToQueue = nodeObj->Get(propName); - if (!nodeToQueue->IsUndefined()) - { - workerArguments.push(nodeToQueue); - } } } if (!callbackFired) { - Local err = Nan::Error("Method next has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fastWalk").ToLocalChecked()); + Local err = Nan::To(Nan::Error("Method next has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fastWalk").ToLocalChecked()); Local argv[1] = { err }; @@ -172,4 +163,6 @@ void GitRevwalk::FastWalkWorker::HandleOKCallback() callback->Call(0, NULL, async_resource); } } + + delete baton; } diff --git a/generate/templates/manual/revwalk/file_history_walk.cc b/generate/templates/manual/revwalk/file_history_walk.cc index 557cb38bc9..569bb022dc 100644 --- a/generate/templates/manual/revwalk/file_history_walk.cc +++ b/generate/templates/manual/revwalk/file_history_walk.cc @@ -1,3 +1,196 @@ +// Note: commit is not owned by this class (must be freed elsewhere) +class FileHistoryEvent { +public: + FileHistoryEvent( + git_delta_t inputType, + bool inputExistsInCurrentTree, + bool inputIsMergeCommit, + git_commit *inputCommit, + const char *inputFrom, + const char *inputTo + ): + type(inputType), + existsInCurrentTree(inputExistsInCurrentTree), + isMergeCommit(inputIsMergeCommit), + from(inputFrom == NULL ? NULL : strdup(inputFrom)), + to(inputTo == NULL ? NULL : strdup(inputTo)), + commit(inputCommit) + { + if (inputCommit != NULL) { + const int error = git_commit_dup(&commit, inputCommit); + assert(error == GIT_OK); + } + } + + FileHistoryEvent(const FileHistoryEvent &) = delete; + FileHistoryEvent(FileHistoryEvent &&) = delete; + FileHistoryEvent &operator=(const FileHistoryEvent &) = delete; + FileHistoryEvent &operator=(FileHistoryEvent &&) = delete; + + ~FileHistoryEvent() { + if (commit != NULL) { + git_commit_free(commit); + } + + if(from != NULL) { + free((void *)from); + } + + if(to != NULL) { + free((void *)to); + } + } + + v8::Local toJavascript() { + v8::Local historyEntry = Nan::New(); + v8::Local owners = Nan::New(0); + Nan::Set( + owners, + Nan::New(owners->Length()), + Nan::To(GitRepository::New( + git_commit_owner(commit), + true + )).ToLocalChecked() + ); + Nan::Set(historyEntry, Nan::New("commit").ToLocalChecked(), GitCommit::New(commit, true, owners)); + commit = NULL; + Nan::Set(historyEntry, Nan::New("status").ToLocalChecked(), Nan::New(type)); + Nan::Set(historyEntry, Nan::New("isMergeCommit").ToLocalChecked(), Nan::New(isMergeCommit)); + if (type == GIT_DELTA_RENAMED) { + if (from != NULL) { + Nan::Set(historyEntry, Nan::New("oldName").ToLocalChecked(), Nan::New(from).ToLocalChecked()); + } + if (to != NULL) { + Nan::Set(historyEntry, Nan::New("newName").ToLocalChecked(), Nan::New(to).ToLocalChecked()); + } + } + return historyEntry; + } + + static int buildHistoryEvent( + FileHistoryEvent **fileHistoryEvent, + git_repository *repo, + git_commit *currentCommit, + git_tree *currentTree, + git_tree *parentTree, + const char *filePath + ) { + int errorCode; + git_tree_entry *currentEntry; + if (git_tree_entry_bypath(¤tEntry, currentTree, filePath) != GIT_OK) { + currentEntry = NULL; + } + git_tree_entry *parentEntry; + if (git_tree_entry_bypath(&parentEntry, parentTree, filePath) != GIT_OK) { + parentEntry = NULL; + } + + if (!currentEntry && !parentEntry) { + *fileHistoryEvent = new FileHistoryEvent(GIT_DELTA_UNMODIFIED, false, false, currentCommit, NULL, NULL); + return GIT_OK; + } + + // The filePath was added + if (currentEntry && !parentEntry) { + git_diff *diff; + if ((errorCode = git_diff_tree_to_tree(&diff, repo, parentTree, currentTree, NULL)) != GIT_OK) { + git_tree_entry_free(currentEntry); + return errorCode; + } + if ((errorCode = git_diff_find_similar(diff, NULL)) != GIT_OK) { + git_diff_free(diff); + git_tree_entry_free(currentEntry); + return errorCode; + } + const size_t numDeltas = git_diff_num_deltas(diff); + for (size_t i = 0; i < numDeltas; ++i) { + const git_diff_delta *delta = git_diff_get_delta(diff, i); + if (delta->new_file.path != NULL && std::strcmp(delta->new_file.path, filePath) == 0) { + if (delta->status == GIT_DELTA_RENAMED + || (delta->old_file.path != NULL && std::strcmp(delta->old_file.path, filePath) != 0)) { + *fileHistoryEvent = new FileHistoryEvent( + GIT_DELTA_RENAMED, + true, + false, + currentCommit, + delta->old_file.path, + delta->new_file.path + ); + git_diff_free(diff); + git_tree_entry_free(currentEntry); + return GIT_OK; + } + break; + } + } + git_diff_free(diff); + git_tree_entry_free(currentEntry); + + *fileHistoryEvent = new FileHistoryEvent(GIT_DELTA_ADDED, true, false, currentCommit, NULL, NULL); + return GIT_OK; + } + + // The filePath was deleted + if (!currentEntry && parentEntry) { + git_diff *diff; + if ((errorCode = git_diff_tree_to_tree(&diff, repo, parentTree, currentTree, NULL)) != GIT_OK) { + git_tree_entry_free(parentEntry); + return errorCode; + } + if ((errorCode = git_diff_find_similar(diff, NULL)) != GIT_OK) { + git_diff_free(diff); + git_tree_entry_free(parentEntry); + return errorCode; + } + const size_t numDeltas = git_diff_num_deltas(diff); + for (size_t i = 0; i < numDeltas; ++i) { + const git_diff_delta *delta = git_diff_get_delta(diff, i); + if (delta->old_file.path != NULL && std::strcmp(delta->old_file.path, filePath) == 0) { + if (delta->status == GIT_DELTA_RENAMED + || (delta->new_file.path != NULL && std::strcmp(delta->new_file.path, filePath) != 0)) { + *fileHistoryEvent = new FileHistoryEvent( + GIT_DELTA_RENAMED, + false, + false, + currentCommit, + delta->old_file.path, + delta->new_file.path + ); + git_diff_free(diff); + git_tree_entry_free(parentEntry); + return GIT_OK; + } + break; + } + } + git_diff_free(diff); + git_tree_entry_free(parentEntry); + + *fileHistoryEvent = new FileHistoryEvent(GIT_DELTA_DELETED, false, false, currentCommit, NULL, NULL); + return GIT_OK; + } + + if (git_oid_cmp(git_tree_entry_id(currentEntry), git_tree_entry_id(parentEntry)) != 0 + || git_tree_entry_filemode(currentEntry) != git_tree_entry_filemode(parentEntry) + ) { + git_tree_entry_free(parentEntry); + git_tree_entry_free(currentEntry); + *fileHistoryEvent = new FileHistoryEvent(GIT_DELTA_MODIFIED, true, false, currentCommit, NULL, NULL); + return GIT_OK; + } + + *fileHistoryEvent = new FileHistoryEvent(GIT_DELTA_UNMODIFIED, true, false, currentCommit, NULL, NULL); + git_tree_entry_free(parentEntry); + git_tree_entry_free(currentEntry); + return GIT_OK; + } + + git_delta_t type; + bool existsInCurrentTree, isMergeCommit; + const char *from, *to; + git_commit *commit; +}; + NAN_METHOD(GitRevwalk::FileHistoryWalk) { if (info.Length() == 0 || !info[0]->IsString()) { @@ -8,277 +201,277 @@ NAN_METHOD(GitRevwalk::FileHistoryWalk) return Nan::ThrowError("Max count is required and must be a number."); } - if (info.Length() == 2 || !info[2]->IsFunction()) { + if (!info[info.Length() - 1]->IsFunction()) { return Nan::ThrowError("Callback is required and must be a Function."); } - FileHistoryWalkBaton* baton = new FileHistoryWalkBaton; + FileHistoryWalkBaton* baton = new FileHistoryWalkBaton(); baton->error_code = GIT_OK; baton->error = NULL; - String::Utf8Value from_js_file_path(info[0]->ToString()); + Nan::Utf8String from_js_file_path(Nan::To(info[0]).ToLocalChecked()); baton->file_path = strdup(*from_js_file_path); baton->max_count = Nan::To(info[1]).FromJust(); - baton->out = new std::vector< std::pair > *>; + baton->out = new std::vector; baton->out->reserve(baton->max_count); baton->walk = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); - Nan::Callback *callback = new Nan::Callback(Local::Cast(info[2])); - FileHistoryWalkWorker *worker = new FileHistoryWalkWorker(baton, callback); - worker->SaveToPersistent("fileHistoryWalk", info.This()); + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + FileHistoryWalkWorker *worker = new FileHistoryWalkWorker(baton, callback, cleanupHandles); + worker->Reference("fileHistoryWalk", info.This()); - Nan::AsyncQueueWorker(worker); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitRevwalk::FileHistoryWalkWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true); + return lockMaster; +} + void GitRevwalk::FileHistoryWalkWorker::Execute() { git_repository *repo = git_revwalk_repository(baton->walk); - git_oid *nextOid = (git_oid *)malloc(sizeof(git_oid)); - giterr_clear(); + git_oid currentOid; + git_error_clear(); for ( - unsigned int i = 0; - i < baton->max_count && (baton->error_code = git_revwalk_next(nextOid, baton->walk)) == GIT_OK; - ++i + unsigned int revwalkIterations = 0; + revwalkIterations < baton->max_count && (baton->error_code = git_revwalk_next(¤tOid, baton->walk)) == GIT_OK; + ++revwalkIterations ) { - // check if this commit has the file - git_commit *nextCommit; - - if ((baton->error_code = git_commit_lookup(&nextCommit, repo, nextOid)) != GIT_OK) { + git_commit *currentCommit; + if ((baton->error_code = git_commit_lookup(¤tCommit, repo, ¤tOid)) != GIT_OK) { break; } - git_tree *thisTree, *parentTree; - if ((baton->error_code = git_commit_tree(&thisTree, nextCommit)) != GIT_OK) { - git_commit_free(nextCommit); + git_tree *currentTree; + if ((baton->error_code = git_commit_tree(¤tTree, currentCommit)) != GIT_OK) { + git_commit_free(currentCommit); break; } - git_diff *diffs; - git_diff_options opts = GIT_DIFF_OPTIONS_INIT; - char *file_path = strdup(baton->file_path); - opts.pathspec.strings = &file_path; - opts.pathspec.count = 1; - git_commit *parent; - unsigned int parents = git_commit_parentcount(nextCommit); - if (parents > 1) { - git_commit_free(nextCommit); - continue; - } else if (parents == 1) { - if ((baton->error_code = git_commit_parent(&parent, nextCommit, 0)) != GIT_OK) { - git_commit_free(nextCommit); - break; + const unsigned int parentCount = git_commit_parentcount(currentCommit); + if (parentCount == 0) { + git_tree_entry* entry; + if (git_tree_entry_bypath(&entry, currentTree, baton->file_path) == GIT_OK) { + baton->out->push_back(new FileHistoryEvent(GIT_DELTA_ADDED, false, false, currentCommit, NULL, NULL)); + git_tree_entry_free(entry); } - if ( - (baton->error_code = git_commit_tree(&parentTree, parent)) != GIT_OK || - (baton->error_code = git_diff_tree_to_tree(&diffs, repo, parentTree, thisTree, &opts)) != GIT_OK - ) { - git_commit_free(nextCommit); - git_commit_free(parent); + git_commit_free(currentCommit); + git_tree_free(currentTree); + continue; + } + + if (parentCount == 1) { + git_commit *parentCommit; + if ((baton->error_code = git_commit_parent(&parentCommit, currentCommit, 0)) != GIT_OK) { + git_commit_free(currentCommit); + git_tree_free(currentTree); break; } - } else { - if ((baton->error_code = git_diff_tree_to_tree(&diffs, repo, NULL, thisTree, &opts)) != GIT_OK) { - git_commit_free(nextCommit); + + git_tree *parentTree; + if ((baton->error_code = git_commit_tree(&parentTree, parentCommit)) != GIT_OK) { + git_commit_free(currentCommit); + git_commit_free(parentCommit); + git_tree_free(currentTree); break; } - } - free(file_path); - opts.pathspec.strings = NULL; - opts.pathspec.count = 0; - bool flag = false; - bool doRenamedPass = false; - unsigned int numDeltas = git_diff_num_deltas(diffs); - for (unsigned int j = 0; j < numDeltas; ++j) { - git_patch *nextPatch; - baton->error_code = git_patch_from_diff(&nextPatch, diffs, j); - - if (baton->error_code < GIT_OK) { + FileHistoryEvent *fileHistoryEvent; + if ((baton->error_code = FileHistoryEvent::buildHistoryEvent( + &fileHistoryEvent, + repo, + currentCommit, + currentTree, + parentTree, + baton->file_path + )) != GIT_OK) { + git_commit_free(currentCommit); + git_commit_free(parentCommit); + git_tree_free(currentTree); + git_tree_free(parentTree); break; } - if (nextPatch == NULL) { - continue; + if (fileHistoryEvent->type != GIT_DELTA_UNMODIFIED) { + baton->out->push_back(fileHistoryEvent); } - const git_diff_delta *delta = git_patch_get_delta(nextPatch); - bool isEqualOldFile = !strncmp(delta->old_file.path, baton->file_path, strlen(baton->file_path)); - bool isEqualNewFile = !strncmp(delta->new_file.path, baton->file_path, strlen(baton->file_path)); + git_commit_free(currentCommit); + git_commit_free(parentCommit); + git_tree_free(currentTree); + git_tree_free(parentTree); + continue; + } - if (isEqualNewFile) { - if (delta->status == GIT_DELTA_ADDED || delta->status == GIT_DELTA_DELETED) { - doRenamedPass = true; - break; - } - std::pair > *historyEntry; - if (!isEqualOldFile) { - historyEntry = new std::pair >( - nextCommit, - std::pair(strdup(delta->old_file.path), delta->status) - ); - } else { - historyEntry = new std::pair >( - nextCommit, - std::pair(strdup(delta->new_file.path), delta->status) - ); - } - baton->out->push_back(historyEntry); - flag = true; + std::pair firstMatchingParentIndex(false, 0); + bool fileExistsInCurrent = false, fileExistsInSomeParent = false; + for (unsigned int parentIndex = 0; parentIndex < parentCount; ++parentIndex) { + git_commit *parentCommit; + if ((baton->error_code = git_commit_parent(&parentCommit, currentCommit, parentIndex)) != GIT_OK) { + break; } - git_patch_free(nextPatch); - - if (flag) { + git_tree *parentTree; + if ((baton->error_code = git_commit_tree(&parentTree, parentCommit)) != GIT_OK) { + git_commit_free(parentCommit); break; } - } - if (doRenamedPass) { - git_diff_free(diffs); + FileHistoryEvent *fileHistoryEvent; + if ((baton->error_code = FileHistoryEvent::buildHistoryEvent( + &fileHistoryEvent, + repo, + currentCommit, + currentTree, + parentTree, + baton->file_path + )) != GIT_OK) { + git_tree_free(parentTree); + git_commit_free(parentCommit); + break; + } - if (parents == 1) { - if ((baton->error_code = git_diff_tree_to_tree(&diffs, repo, parentTree, thisTree, NULL)) != GIT_OK) { - git_commit_free(nextCommit); + switch (fileHistoryEvent->type) { + case GIT_DELTA_ADDED: { + fileExistsInCurrent = true; break; } - if ((baton->error_code = git_diff_find_similar(diffs, NULL)) != GIT_OK) { - git_commit_free(nextCommit); + case GIT_DELTA_MODIFIED: { + fileExistsInCurrent = true; + fileExistsInSomeParent = true; break; } - } else { - if ((baton->error_code = git_diff_tree_to_tree(&diffs, repo, NULL, thisTree, NULL)) != GIT_OK) { - git_commit_free(nextCommit); + case GIT_DELTA_DELETED: { + fileExistsInSomeParent = true; break; } - if((baton->error_code = git_diff_find_similar(diffs, NULL)) != GIT_OK) { - git_commit_free(nextCommit); - break; - } - } - - flag = false; - numDeltas = git_diff_num_deltas(diffs); - for (unsigned int j = 0; j < numDeltas; ++j) { - git_patch *nextPatch; - baton->error_code = git_patch_from_diff(&nextPatch, diffs, j); - - if (baton->error_code < GIT_OK) { + case GIT_DELTA_RENAMED: { + if (fileHistoryEvent->existsInCurrentTree) { + fileExistsInCurrent = true; + } else { + fileExistsInSomeParent = true; + } break; } - - if (nextPatch == NULL) { - continue; - } - - const git_diff_delta *delta = git_patch_get_delta(nextPatch); - bool isEqualOldFile = !strncmp(delta->old_file.path, baton->file_path, strlen(baton->file_path)); - bool isEqualNewFile = !strncmp(delta->new_file.path, baton->file_path, strlen(baton->file_path)); - int oldLen = strlen(delta->old_file.path); - int newLen = strlen(delta->new_file.path); - char *outPair = new char[oldLen + newLen + 2]; - strcpy(outPair, delta->new_file.path); - outPair[newLen] = '\n'; - outPair[newLen + 1] = '\0'; - strcat(outPair, delta->old_file.path); - - if (isEqualNewFile) { - std::pair > *historyEntry; - if (!isEqualOldFile || delta->status == GIT_DELTA_RENAMED) { - historyEntry = new std::pair >( - nextCommit, - std::pair(strdup(outPair), delta->status) - ); - } else { - historyEntry = new std::pair >( - nextCommit, - std::pair(strdup(delta->new_file.path), delta->status) - ); + case GIT_DELTA_UNMODIFIED: { + if (fileHistoryEvent->existsInCurrentTree) { + fileExistsInCurrent = true; + fileExistsInSomeParent = true; } - baton->out->push_back(historyEntry); - flag = true; - } else if (isEqualOldFile) { - std::pair > *historyEntry; - historyEntry = new std::pair >( - nextCommit, - std::pair(strdup(outPair), delta->status) - ); - baton->out->push_back(historyEntry); - flag = true; + firstMatchingParentIndex = std::make_pair(true, parentIndex); + break; } - - delete[] outPair; - - git_patch_free(nextPatch); - - if (flag) { + default: { break; } } - } - git_diff_free(diffs); + delete fileHistoryEvent; + git_commit_free(parentCommit); + git_tree_free(parentTree); - if (!flag && nextCommit != NULL) { - git_commit_free(nextCommit); + if (firstMatchingParentIndex.first) { + break; + } } if (baton->error_code != GIT_OK) { + git_tree_free(currentTree); + git_commit_free(currentCommit); break; } - } - free(nextOid); + if (!firstMatchingParentIndex.first) { + assert(fileExistsInCurrent || fileExistsInSomeParent); + git_delta_t mergeType = GIT_DELTA_UNREADABLE; // It will never result in this case because of the assertion above. + if (fileExistsInCurrent && fileExistsInSomeParent) { + mergeType = GIT_DELTA_MODIFIED; + } else if (fileExistsInCurrent) { + mergeType = GIT_DELTA_ADDED; + } else if (fileExistsInSomeParent) { + mergeType = GIT_DELTA_DELETED; + } - if (baton->error_code != GIT_OK) { - if (baton->error_code != GIT_ITEROVER) { - baton->error = git_error_dup(giterr_last()); + FileHistoryEvent *fileHistoryEvent = new FileHistoryEvent( + mergeType, + mergeType != GIT_DELTA_DELETED, + true, + currentCommit, + NULL, + NULL + ); + baton->out->push_back(fileHistoryEvent); + git_tree_free(currentTree); + git_commit_free(currentCommit); + continue; + } - while(!baton->out->empty()) - { - std::pair > *pairToFree = baton->out->back(); - baton->out->pop_back(); - git_commit_free(pairToFree->first); - free(pairToFree->second.first); - free(pairToFree); + assert(firstMatchingParentIndex.first); + for (unsigned int parentIndex = 0; parentIndex < parentCount; ++parentIndex) { + if (parentIndex == firstMatchingParentIndex.second) { + continue; } - delete baton->out; + const git_oid *parentOid = git_commit_parent_id(currentCommit, parentIndex); + assert(parentOid != NULL); + git_revwalk_hide(baton->walk, parentOid); + } + git_commit_free(currentCommit); + git_tree_free(currentTree); + } - baton->out = NULL; + if (baton->error_code != GIT_OK && baton->error_code != GIT_ITEROVER) { + // Something went wrong in our loop, discard everything in the async worker + for (unsigned int i = 0; i < baton->out->size(); ++i) { + delete static_cast(baton->out->at(i)); } - } else { - baton->error_code = GIT_OK; + delete baton->out; + baton->out = NULL; + baton->error = git_error_dup(git_error_last()); } + free((void *)baton->file_path); + baton->file_path = NULL; +} + +void GitRevwalk::FileHistoryWalkWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + for (unsigned int i = 0; i < baton->out->size(); ++i) { + delete static_cast(baton->out->at(i)); + } + + delete baton->out; + + free((void *)baton->file_path); + + delete baton; } void GitRevwalk::FileHistoryWalkWorker::HandleOKCallback() { if (baton->out != NULL) { - unsigned int size = baton->out->size(); - Local result = Nan::New(size); + const unsigned int size = baton->out->size(); + v8::Local result = Nan::New(size); for (unsigned int i = 0; i < size; i++) { - Local historyEntry = Nan::New(); - std::pair > *batonResult = baton->out->at(i); - Nan::Set(historyEntry, Nan::New("commit").ToLocalChecked(), GitCommit::New(batonResult->first, true)); - Nan::Set(historyEntry, Nan::New("status").ToLocalChecked(), Nan::New(batonResult->second.second)); - if (batonResult->second.second == GIT_DELTA_RENAMED) { - char *namePair = batonResult->second.first; - char *split = strchr(namePair, '\n'); - *split = '\0'; - char *oldName = split + 1; - - Nan::Set(historyEntry, Nan::New("oldName").ToLocalChecked(), Nan::New(oldName).ToLocalChecked()); - Nan::Set(historyEntry, Nan::New("newName").ToLocalChecked(), Nan::New(namePair).ToLocalChecked()); - } - Nan::Set(result, Nan::New(i), historyEntry); - - free(batonResult->second.first); - free(batonResult); + FileHistoryEvent *batonResult = static_cast(baton->out->at(i)); + Nan::Set(result, Nan::New(i), batonResult->toJavascript()); + delete batonResult; } - Local argv[2] = { + Nan::Set(result, Nan::New("reachedEndOfHistory").ToLocalChecked(), Nan::New(baton->error_code == GIT_ITEROVER)); + + v8::Local argv[2] = { Nan::Null(), result }; @@ -289,15 +482,15 @@ void GitRevwalk::FileHistoryWalkWorker::HandleOKCallback() } if (baton->error) { - Local err; + v8::Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method fileHistoryWalk has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method fileHistoryWalk has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fileHistoryWalk").ToLocalChecked()); - Local argv[1] = { + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fileHistoryWalk").ToLocalChecked()); + v8::Local argv[1] = { err }; callback->Call(1, argv, async_resource); @@ -311,10 +504,10 @@ void GitRevwalk::FileHistoryWalkWorker::HandleOKCallback() } if (baton->error_code < 0) { - Local err = Nan::Error("Method next has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fileHistoryWalk").ToLocalChecked()); - Local argv[1] = { + v8::Local err = Nan::To(Nan::Error("Method next has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.fileHistoryWalk").ToLocalChecked()); + v8::Local argv[1] = { err }; callback->Call(1, argv, async_resource); @@ -322,4 +515,6 @@ void GitRevwalk::FileHistoryWalkWorker::HandleOKCallback() } callback->Call(0, NULL, async_resource); + + delete baton; } diff --git a/generate/templates/manual/src/async_baton.cc b/generate/templates/manual/src/async_baton.cc index 590a19c621..56694f33f1 100644 --- a/generate/templates/manual/src/async_baton.cc +++ b/generate/templates/manual/src/async_baton.cc @@ -1,5 +1,89 @@ #include "../include/async_baton.h" -void deleteBaton(AsyncBaton *baton) { - delete baton; +namespace nodegit { + void deleteBaton(AsyncBaton *baton) { + delete baton; + } + + AsyncBaton::AsyncBaton() + : asyncResource(ThreadPool::GetCurrentAsyncResource()), + callbackErrorHandle(*ThreadPool::GetCurrentCallbackErrorHandle()), + completedMutex(new std::mutex), + hasCompleted(false) + {} + + void AsyncBaton::SignalCompletion() { + std::lock_guard lock(*completedMutex); + hasCompleted = true; + completedCondition.notify_one(); + } + + void AsyncBaton::Done() { + onCompletion(); + } + + Nan::AsyncResource *AsyncBaton::GetAsyncResource() { + return asyncResource; + } + + void AsyncBaton::SetCallbackError(v8::Local error) { + callbackErrorHandle.Reset(error); + } + + void AsyncBaton::ExecuteAsyncPerform(AsyncCallback asyncCallback, AsyncCallback asyncCancelCb, CompletionCallback onCompletion) { + auto jsCallback = [asyncCallback, this]() { + asyncCallback(this); + }; + auto cancelCallback = [asyncCancelCb, this]() { + asyncCancelCb(this); + }; + + if (onCompletion) { + this->onCompletion = [this, onCompletion]() { + onCompletion(this); + }; + + ThreadPool::PostCallbackEvent( + [jsCallback, cancelCallback]( + ThreadPool::QueueCallbackFn queueCallback, + ThreadPool::Callback callbackCompleted, + bool isThreaded // Temporary workaround for LFS checkout. Code added to be reverted. + ) -> ThreadPool::Callback { + queueCallback(jsCallback, cancelCallback); + callbackCompleted(); + + return []() {}; + } + ); + } else { + ThreadPool::PostCallbackEvent( + [this, jsCallback, cancelCallback]( + ThreadPool::QueueCallbackFn queueCallback, + ThreadPool::Callback callbackCompleted, + bool isThreaded // Temporary workaround for LFS checkout. Code added to be reverted. + ) -> ThreadPool::Callback { + // Temporary workaround for LFS checkout. Code modified to be reverted. + if (!isThreaded) { + this->onCompletion = callbackCompleted; + + queueCallback(jsCallback, cancelCallback); + + return std::bind(&AsyncBaton::SignalCompletion, this); + } + else { + this->onCompletion = std::bind(&AsyncBaton::SignalCompletion, this); + queueCallback(jsCallback, cancelCallback); + return []() {}; + } + } + ); + + WaitForCompletion(); + } + } + + void AsyncBaton::WaitForCompletion() { + std::unique_lock lock(*completedMutex); + while (!hasCompleted) completedCondition.wait(lock); + } } diff --git a/generate/templates/manual/src/async_worker.cc b/generate/templates/manual/src/async_worker.cc new file mode 100644 index 0000000000..709e8a1e83 --- /dev/null +++ b/generate/templates/manual/src/async_worker.cc @@ -0,0 +1,43 @@ +#include "../include/async_worker.h" + +namespace nodegit { + AsyncWorker::AsyncWorker(Nan::Callback *callback, const char *resourceName, std::map> &_cleanupHandles) + : Nan::AsyncWorker(callback, resourceName), cleanupHandles(_cleanupHandles) + {} + + AsyncWorker::AsyncWorker(Nan::Callback *callback, const char *resourceName) + : Nan::AsyncWorker(callback, resourceName) + {} + + void AsyncWorker::Cancel() { + isCancelled = true; + + // We use Nan::AsyncWorker's ErrorMessage flow + // to trigger `HandleErrorCallback` for cancellation + // of AsyncWork + SetErrorMessage("SHUTTING DOWN"); + } + + Nan::AsyncResource *AsyncWorker::GetAsyncResource() { + return async_resource; + } + + Nan::Global *AsyncWorker::GetCallbackErrorHandle() { + return &callbackErrorHandle; + } + + bool AsyncWorker::GetIsCancelled() const { + return isCancelled; + } + + void AsyncWorker::Destroy() { + std::for_each(cleanupCalls.begin(), cleanupCalls.end(), [](std::function cleanupCall) { + cleanupCall(); + }); + Nan::AsyncWorker::Destroy(); + } + + void AsyncWorker::RegisterCleanupCall(std::function cleanupCall) { + cleanupCalls.push_back(cleanupCall); + } +} diff --git a/generate/templates/manual/src/cleanup_handle.cc b/generate/templates/manual/src/cleanup_handle.cc new file mode 100644 index 0000000000..b243d4f0e0 --- /dev/null +++ b/generate/templates/manual/src/cleanup_handle.cc @@ -0,0 +1,6 @@ +#include "../include/cleanup_handle.h" + +namespace nodegit { + CleanupHandle::CleanupHandle() {} + CleanupHandle::~CleanupHandle() {} +} diff --git a/generate/templates/manual/src/context.cc b/generate/templates/manual/src/context.cc new file mode 100644 index 0000000000..a4c9483dc7 --- /dev/null +++ b/generate/templates/manual/src/context.cc @@ -0,0 +1,84 @@ +#include "../include/context.h" + +namespace nodegit { + std::map Context::contexts; + + AsyncContextCleanupHandle::AsyncContextCleanupHandle(v8::Isolate *isolate, Context *context) + : context(context), + handle(node::AddEnvironmentCleanupHook(isolate, AsyncCleanupContext, this)) + {} + + AsyncContextCleanupHandle::~AsyncContextCleanupHandle() { + delete context; + doneCallback(doneData); + } + + void AsyncContextCleanupHandle::AsyncCleanupContext(void *data, void(*uvCallback)(void*), void *uvCallbackData) { + std::unique_ptr cleanupHandle(static_cast(data)); + cleanupHandle->doneCallback = uvCallback; + cleanupHandle->doneData = uvCallbackData; + // the ordering of std::move and the call to Context::ShutdownThreadPool prohibits + // us from referring to context on cleanupHandle if we're also intending to move + // the unique_ptr into the method. + Context *context = cleanupHandle->context; + context->ShutdownThreadPool(std::move(cleanupHandle)); + } + + Context::Context(v8::Isolate *isolate) + : isolate(isolate) + , threadPool(10, node::GetCurrentEventLoop(isolate), this) + { + Nan::HandleScope scopoe; + v8::Local storage = Nan::New(); + persistentStorage.Reset(storage); + contexts[isolate] = this; + new AsyncContextCleanupHandle(isolate, this); + } + + Context::~Context() { + nodegit::TrackerWrap::DeleteFromList(&trackerList); + contexts.erase(isolate); + } + + std::shared_ptr Context::GetCleanupHandle(std::string key) { + return cleanupHandles[key]; + } + + Context *Context::GetCurrentContext() { + Nan::HandleScope scope; + v8::Local context = Nan::GetCurrentContext(); + v8::Isolate *isolate = context->GetIsolate(); + return contexts[isolate]; + } + + v8::Local Context::GetFromPersistent(std::string key) { + Nan::EscapableHandleScope scope; + v8::Local storage = Nan::New(persistentStorage); + Nan::MaybeLocal value = Nan::Get(storage, Nan::New(key).ToLocalChecked()); + return scope.Escape(value.ToLocalChecked()); + } + + void Context::QueueWorker(nodegit::AsyncWorker *worker) { + threadPool.QueueWorker(worker); + } + + std::shared_ptr Context::RemoveCleanupHandle(std::string key) { + std::shared_ptr cleanupItem = cleanupHandles[key]; + cleanupHandles.erase(key); + return cleanupItem; + } + + void Context::SaveToPersistent(std::string key, const v8::Local &value) { + Nan::HandleScope scope; + v8::Local storage = Nan::New(persistentStorage); + Nan::Set(storage, Nan::New(key).ToLocalChecked(), value); + } + + void Context::SaveCleanupHandle(std::string key, std::shared_ptr cleanupItem) { + cleanupHandles[key] = cleanupItem; + } + + void Context::ShutdownThreadPool(std::unique_ptr cleanupHandle) { + threadPool.Shutdown(std::move(cleanupHandle)); + } +} diff --git a/generate/templates/manual/src/convenient_hunk.cc b/generate/templates/manual/src/convenient_hunk.cc index 184f015a1a..2d33e50b66 100644 --- a/generate/templates/manual/src/convenient_hunk.cc +++ b/generate/templates/manual/src/convenient_hunk.cc @@ -5,6 +5,7 @@ extern "C" { #include } +#include "../include/context.h" #include "../include/functions/copy.h" #include "../include/convenient_hunk.h" #include "../include/diff_line.h" @@ -32,27 +33,28 @@ ConvenientHunk::~ConvenientHunk() { HunkDataFree(this->hunk); } -void ConvenientHunk::InitializeComponent(Local target) { +void ConvenientHunk::InitializeComponent(Local target, nodegit::Context *nodegitContext) { Nan::HandleScope scope; - Local tpl = Nan::New(JSNewFunction); + Local nodegitExternal = Nan::New(nodegitContext); + Local tpl = Nan::New(JSNewFunction, nodegitExternal); - tpl->InstanceTemplate()->SetInternalFieldCount(1); + tpl->InstanceTemplate()->SetInternalFieldCount(2); tpl->SetClassName(Nan::New("ConvenientHunk").ToLocalChecked()); - Nan::SetPrototypeMethod(tpl, "size", Size); - Nan::SetPrototypeMethod(tpl, "lines", Lines); + Nan::SetPrototypeMethod(tpl, "size", Size, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "lines", Lines, nodegitExternal); - Nan::SetPrototypeMethod(tpl, "oldStart", OldStart); - Nan::SetPrototypeMethod(tpl, "oldLines", OldLines); - Nan::SetPrototypeMethod(tpl, "newStart", NewStart); - Nan::SetPrototypeMethod(tpl, "newLines", NewLines); - Nan::SetPrototypeMethod(tpl, "headerLen", HeaderLen); - Nan::SetPrototypeMethod(tpl, "header", Header); + Nan::SetPrototypeMethod(tpl, "oldStart", OldStart, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "oldLines", OldLines, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "newStart", NewStart, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "newLines", NewLines, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "headerLen", HeaderLen, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "header", Header, nodegitExternal); - Local _constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); - constructor_template.Reset(_constructor_template); - Nan::Set(target, Nan::New("ConvenientHunk").ToLocalChecked(), _constructor_template); + Local constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); + nodegitContext->SaveToPersistent("ConvenientHunk::Template", constructor_template); + Nan::Set(target, Nan::New("ConvenientHunk").ToLocalChecked(), constructor_template); } NAN_METHOD(ConvenientHunk::JSNewFunction) { @@ -70,7 +72,9 @@ NAN_METHOD(ConvenientHunk::JSNewFunction) { Local ConvenientHunk::New(void *raw) { Nan::EscapableHandleScope scope; Local argv[1] = { Nan::New((void *)raw) }; - return scope.Escape(Nan::NewInstance(Nan::New(ConvenientHunk::constructor_template), 1, argv).ToLocalChecked()); + nodegit::Context *nodegitContext = nodegit::Context::GetCurrentContext(); + Local constructor_template = nodegitContext->GetFromPersistent("ConvenientHunk::Template").As(); + return scope.Escape(Nan::NewInstance(constructor_template, 1, argv).ToLocalChecked()); } HunkData *ConvenientHunk::GetValue() { @@ -92,22 +96,27 @@ NAN_METHOD(ConvenientHunk::Lines) { return Nan::ThrowError("Callback is required and must be a Function."); } - LinesBaton *baton = new LinesBaton; + LinesBaton *baton = new LinesBaton(); baton->hunk = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + baton->lines = new std::vector; + baton->lines->reserve(baton->hunk->numLines); Nan::Callback *callback = new Nan::Callback(Local::Cast(info[0])); LinesWorker *worker = new LinesWorker(baton, callback); - worker->SaveToPersistent("hunk", info.This()); + worker->Reference("hunk", info.This()); - Nan::AsyncQueueWorker(worker); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster ConvenientHunk::LinesWorker::AcquireLocks() { + return nodegit::LockMaster(true); +} + void ConvenientHunk::LinesWorker::Execute() { - baton->lines = new std::vector; - baton->lines->reserve(baton->hunk->numLines); for (unsigned int i = 0; i < baton->hunk->numLines; ++i) { git_diff_line *storeLine = (git_diff_line *)malloc(sizeof(git_diff_line)); storeLine->origin = baton->hunk->lines->at(i)->origin; @@ -121,6 +130,15 @@ void ConvenientHunk::LinesWorker::Execute() { } } +void ConvenientHunk::LinesWorker::HandleErrorCallback() { + while (!baton->lines->empty()) { + free(baton->lines->back()); + baton->lines->pop_back(); + } + + delete baton->lines; +} + void ConvenientHunk::LinesWorker::HandleOKCallback() { unsigned int size = baton->lines->size(); Local result = Nan::New(size); @@ -136,6 +154,8 @@ void ConvenientHunk::LinesWorker::HandleOKCallback() { result }; callback->Call(2, argv, async_resource); + + delete baton; } NAN_METHOD(ConvenientHunk::OldStart) { @@ -182,4 +202,10 @@ NAN_METHOD(ConvenientHunk::Header) { info.GetReturnValue().Set(to); } -Nan::Persistent ConvenientHunk::constructor_template; +void ConvenientHunk::Reference() { + Ref(); +} + +void ConvenientHunk::Unreference() { + Unref(); +} diff --git a/generate/templates/manual/src/convenient_patch.cc b/generate/templates/manual/src/convenient_patch.cc index 9c6cd46ca1..22e6e6b49b 100644 --- a/generate/templates/manual/src/convenient_patch.cc +++ b/generate/templates/manual/src/convenient_patch.cc @@ -5,6 +5,7 @@ extern "C" { #include } +#include "../include/context.h" #include "../include/convenient_hunk.h" #include "../include/convenient_patch.h" #include "../include/functions/copy.h" @@ -32,7 +33,7 @@ void PatchDataFree(PatchData *patch) { } PatchData *createFromRaw(git_patch *raw) { - PatchData *patch = new PatchData; + PatchData *patch = new PatchData(); const git_diff_delta *delta = git_patch_get_delta(raw); patch->status = delta->status; @@ -55,7 +56,7 @@ PatchData *createFromRaw(git_patch *raw) { patch->hunks->reserve(patch->numHunks); for (unsigned int i = 0; i < patch->numHunks; ++i) { - HunkData *hunkData = new HunkData; + HunkData *hunkData = new HunkData(); const git_diff_hunk *hunk = NULL; int result = git_patch_get_hunk(&hunk, &hunkData->numLines, raw, i); if (result != 0) { @@ -129,36 +130,37 @@ ConvenientPatch::~ConvenientPatch() { PatchDataFree(this->patch); } -void ConvenientPatch::InitializeComponent(Local target) { +void ConvenientPatch::InitializeComponent(Local target, nodegit::Context *nodegitContext) { Nan::HandleScope scope; - Local tpl = Nan::New(JSNewFunction); + Local nodegitExternal = Nan::New(nodegitContext); + Local tpl = Nan::New(JSNewFunction, nodegitExternal); - tpl->InstanceTemplate()->SetInternalFieldCount(1); + tpl->InstanceTemplate()->SetInternalFieldCount(2); tpl->SetClassName(Nan::New("ConvenientPatch").ToLocalChecked()); - Nan::SetPrototypeMethod(tpl, "hunks", Hunks); - Nan::SetPrototypeMethod(tpl, "lineStats", LineStats); - Nan::SetPrototypeMethod(tpl, "size", Size); - - Nan::SetPrototypeMethod(tpl, "oldFile", OldFile); - Nan::SetPrototypeMethod(tpl, "newFile", NewFile); - Nan::SetPrototypeMethod(tpl, "status", Status); - Nan::SetPrototypeMethod(tpl, "isUnmodified", IsUnmodified); - Nan::SetPrototypeMethod(tpl, "isAdded", IsAdded); - Nan::SetPrototypeMethod(tpl, "isDeleted", IsDeleted); - Nan::SetPrototypeMethod(tpl, "isModified", IsModified); - Nan::SetPrototypeMethod(tpl, "isRenamed", IsRenamed); - Nan::SetPrototypeMethod(tpl, "isCopied", IsCopied); - Nan::SetPrototypeMethod(tpl, "isIgnored", IsIgnored); - Nan::SetPrototypeMethod(tpl, "isUntracked", IsUntracked); - Nan::SetPrototypeMethod(tpl, "isTypeChange", IsTypeChange); - Nan::SetPrototypeMethod(tpl, "isUnreadable", IsUnreadable); - Nan::SetPrototypeMethod(tpl, "isConflicted", IsConflicted); - - Local _constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); - constructor_template.Reset(_constructor_template); - Nan::Set(target, Nan::New("ConvenientPatch").ToLocalChecked(), _constructor_template); + Nan::SetPrototypeMethod(tpl, "hunks", Hunks, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "lineStats", LineStats, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "size", Size, nodegitExternal); + + Nan::SetPrototypeMethod(tpl, "oldFile", OldFile, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "newFile", NewFile, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "status", Status, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isUnmodified", IsUnmodified, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isAdded", IsAdded, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isDeleted", IsDeleted, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isModified", IsModified, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isRenamed", IsRenamed, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isCopied", IsCopied, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isIgnored", IsIgnored, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isUntracked", IsUntracked, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isTypeChange", IsTypeChange, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isUnreadable", IsUnreadable, nodegitExternal); + Nan::SetPrototypeMethod(tpl, "isConflicted", IsConflicted, nodegitExternal); + + Local constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); + nodegitContext->SaveToPersistent("ConvenientPatch::Template", constructor_template); + Nan::Set(target, Nan::New("ConvenientPatch").ToLocalChecked(), constructor_template); } NAN_METHOD(ConvenientPatch::JSNewFunction) { @@ -176,7 +178,9 @@ NAN_METHOD(ConvenientPatch::JSNewFunction) { Local ConvenientPatch::New(void *raw) { Nan::EscapableHandleScope scope; Local argv[1] = { Nan::New((void *)raw) }; - return scope.Escape(Nan::NewInstance(Nan::New(ConvenientPatch::constructor_template), 1, argv).ToLocalChecked()); + nodegit::Context *nodegitContext = nodegit::Context::GetCurrentContext(); + Local constructor_template = nodegitContext->GetFromPersistent("ConvenientPatch::Template").As(); + return scope.Escape(Nan::NewInstance(constructor_template, 1, argv).ToLocalChecked()); } ConvenientLineStats ConvenientPatch::GetLineStats() { @@ -208,26 +212,30 @@ NAN_METHOD(ConvenientPatch::Hunks) { return Nan::ThrowError("Callback is required and must be a Function."); } - HunksBaton *baton = new HunksBaton; + HunksBaton *baton = new HunksBaton(); baton->patch = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + baton->hunks = new std::vector; + baton->hunks->reserve(baton->patch->numHunks); Nan::Callback *callback = new Nan::Callback(Local::Cast(info[0])); HunksWorker *worker = new HunksWorker(baton, callback); - worker->SaveToPersistent("patch", info.This()); + worker->Reference("patch", info.This()); - Nan::AsyncQueueWorker(worker); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster ConvenientPatch::HunksWorker::AcquireLocks() { + return nodegit::LockMaster(true); +} + void ConvenientPatch::HunksWorker::Execute() { // copy hunks - baton->hunks = new std::vector; - baton->hunks->reserve(baton->patch->numHunks); - for (unsigned int i = 0; i < baton->patch->numHunks; ++i) { - HunkData *hunkData = new HunkData; + HunkData *hunkData = new HunkData(); hunkData->numLines = baton->patch->hunks->at(i)->numLines; hunkData->hunk.old_start = baton->patch->hunks->at(i)->hunk.old_start; hunkData->hunk.old_lines = baton->patch->hunks->at(i)->hunk.old_lines; @@ -254,6 +262,20 @@ void ConvenientPatch::HunksWorker::Execute() { } } +void ConvenientPatch::HunksWorker::HandleErrorCallback() { + while (!baton->hunks->empty()) { + HunkData *hunk = baton->hunks->back(); + baton->hunks->pop_back(); + + while (!hunk->lines->empty()) { + free(hunk->lines->back()); + hunk->lines->pop_back(); + } + } + + delete baton->hunks; +} + void ConvenientPatch::HunksWorker::HandleOKCallback() { unsigned int size = baton->hunks->size(); Local result = Nan::New(size); @@ -269,6 +291,8 @@ void ConvenientPatch::HunksWorker::HandleOKCallback() { result }; callback->Call(2, argv, async_resource); + + delete baton; } NAN_METHOD(ConvenientPatch::LineStats) { @@ -397,4 +421,10 @@ NAN_METHOD(ConvenientPatch::IsConflicted) { info.GetReturnValue().Set(to); } -Nan::Persistent ConvenientPatch::constructor_template; +void ConvenientPatch::Reference() { + Ref(); +} + +void ConvenientPatch::Unreference() { + Unref(); +} diff --git a/generate/templates/manual/src/filter_registry.cc b/generate/templates/manual/src/filter_registry.cc index d2d2db933d..21a7cfbb86 100644 --- a/generate/templates/manual/src/filter_registry.cc +++ b/generate/templates/manual/src/filter_registry.cc @@ -6,11 +6,12 @@ extern "C" { } #include "../include/nodegit.h" +#include "../include/cleanup_handle.h" +#include "../include/context.h" #include "../include/lock_master.h" #include "../include/functions/copy.h" #include "../include/filter_registry.h" #include "nodegit_wrapper.cc" -#include "../include/async_libgit2_queue_worker.h" #include "../include/filter.h" @@ -18,19 +19,19 @@ using namespace std; using namespace v8; using namespace node; -Nan::Persistent GitFilterRegistry::persistentHandle; - -// #pragma unmanaged -void GitFilterRegistry::InitializeComponent(v8::Local target) { +void GitFilterRegistry::InitializeComponent(v8::Local target, nodegit::Context *nodegitContext) { Nan::HandleScope scope; - v8::Local object = Nan::New(); + v8::Local filterRegistry = Nan::New(); - Nan::SetMethod(object, "register", GitFilterRegister); - Nan::SetMethod(object, "unregister", GitFilterUnregister); + Local nodegitExternal = Nan::New(nodegitContext); + Nan::SetMethod(filterRegistry, "register", GitFilterRegister, nodegitExternal); + Nan::SetMethod(filterRegistry, "unregister", GitFilterUnregister, nodegitExternal); - Nan::Set(target, Nan::New("FilterRegistry").ToLocalChecked(), object); - GitFilterRegistry::persistentHandle.Reset(object); + Nan::Set(target, Nan::New("FilterRegistry").ToLocalChecked(), filterRegistry); + nodegitContext->SaveToPersistent("FilterRegistry", filterRegistry); + std::shared_ptr filterRegistryCleanupHandles(new nodegit::FilterRegistryCleanupHandles); + nodegitContext->SaveCleanupHandle("filterRegistry", filterRegistryCleanupHandles); } NAN_METHOD(GitFilterRegistry::GitFilterRegister) { @@ -52,10 +53,23 @@ NAN_METHOD(GitFilterRegistry::GitFilterRegister) { return Nan::ThrowError("Callback is required and must be a Function."); } - FilterRegisterBaton *baton = new FilterRegisterBaton; + FilterRegisterBaton *baton = new FilterRegisterBaton(); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + std::map> cleanupHandles; + + { + auto conversionResult = ConfigurableGitFilter::fromJavascript(nodegitContext, info[1]); + if (!conversionResult.result) { + delete baton; + return Nan::ThrowError(Nan::New(conversionResult.error).ToLocalChecked()); + } + + auto convertedObject = conversionResult.result; + cleanupHandles["filter"] = convertedObject; + baton->filter = convertedObject->GetValue(); + } - baton->filter = Nan::ObjectWrap::Unwrap(info[1]->ToObject())->GetValue(); - String::Utf8Value name(info[0]->ToString()); + Nan::Utf8String name(Nan::To(info[0]).ToLocalChecked()); baton->filter_name = (char *)malloc(name.length() + 1); memcpy((void *)baton->filter_name, *name, name.length()); @@ -64,34 +78,50 @@ NAN_METHOD(GitFilterRegistry::GitFilterRegister) { baton->error_code = GIT_OK; baton->filter_priority = Nan::To(info[2]).FromJust(); - Nan::New(GitFilterRegistry::persistentHandle)->Set(info[0]->ToString(), info[1]->ToObject()); - Nan::Callback *callback = new Nan::Callback(Local::Cast(info[3])); - RegisterWorker *worker = new RegisterWorker(baton, callback); + RegisterWorker *worker = new RegisterWorker(baton, callback, cleanupHandles); - worker->SaveToPersistent("filter_name", info[0]->ToObject()); - worker->SaveToPersistent("filter_priority", info[2]->ToObject()); + worker->Reference("filter_name", info[0]); + worker->Reference("filter_priority", info[2]); - AsyncLibgit2QueueWorker(worker); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitFilterRegistry::RegisterWorker::AcquireLocks() { + return nodegit::LockMaster(true, baton->filter_name, baton->filter); +} + void GitFilterRegistry::RegisterWorker::Execute() { - giterr_clear(); + git_error_clear(); { - LockMaster lockMaster(/*asyncAction: */true, baton->filter_name, baton->filter); int result = git_filter_register(baton->filter_name, baton->filter, baton->filter_priority); baton->error_code = result; - if (result != GIT_OK && giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); + if (result != GIT_OK && git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } + } +} + +void GitFilterRegistry::RegisterWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); } + + free((void *)baton->error); } + + free(baton->filter_name); + + delete baton; } void GitFilterRegistry::RegisterWorker::HandleOKCallback() { if (baton->error_code == GIT_OK) { + static_pointer_cast(nodegit::Context::GetCurrentContext()->GetCleanupHandle("filterRegistry"))->registeredFilters[baton->filter_name] = cleanupHandles["filter"]; v8::Local result = Nan::New(baton->error_code); v8::Local argv[2] = { Nan::Null(), @@ -102,12 +132,12 @@ void GitFilterRegistry::RegisterWorker::HandleOKCallback() { else if (baton->error) { v8::Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method register has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method register has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.register").ToLocalChecked()); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.register").ToLocalChecked()); v8::Local argv[1] = { err }; @@ -117,9 +147,9 @@ void GitFilterRegistry::RegisterWorker::HandleOKCallback() { free((void *)baton->error); } else if (baton->error_code < 0) { - v8::Local err = Nan::Error("Method register has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.register").ToLocalChecked()); + v8::Local err = Nan::To(Nan::Error("Method register has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.register").ToLocalChecked()); v8::Local argv[1] = { err }; @@ -128,8 +158,10 @@ void GitFilterRegistry::RegisterWorker::HandleOKCallback() { else { callback->Call(0, NULL, async_resource); } + + free(baton->filter_name); + delete baton; - return; } NAN_METHOD(GitFilterRegistry::GitFilterUnregister) { @@ -143,8 +175,8 @@ NAN_METHOD(GitFilterRegistry::GitFilterUnregister) { return Nan::ThrowError("Callback is required and must be a Function."); } - FilterUnregisterBaton *baton = new FilterUnregisterBaton; - String::Utf8Value name(info[0]->ToString()); + FilterUnregisterBaton *baton = new FilterUnregisterBaton(); + Nan::Utf8String name(Nan::To(info[0]).ToLocalChecked()); baton->filter_name = (char *)malloc(name.length() + 1); memcpy((void *)baton->filter_name, *name, name.length()); @@ -156,28 +188,45 @@ NAN_METHOD(GitFilterRegistry::GitFilterUnregister) { Nan::Callback *callback = new Nan::Callback(Local::Cast(info[1])); UnregisterWorker *worker = new UnregisterWorker(baton, callback); - worker->SaveToPersistent("filter_name", info[0]); - - AsyncLibgit2QueueWorker(worker); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); return; } +nodegit::LockMaster GitFilterRegistry::UnregisterWorker::AcquireLocks() { + return nodegit::LockMaster(true, baton->filter_name); +} + void GitFilterRegistry::UnregisterWorker::Execute() { - giterr_clear(); + git_error_clear(); { - LockMaster lockMaster(/*asyncAction: */true, baton->filter_name); int result = git_filter_unregister(baton->filter_name); baton->error_code = result; - if (result != GIT_OK && giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); + if (result != GIT_OK && git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } + } +} + +void GitFilterRegistry::UnregisterWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); } + + free((void *)baton->error); } + + free(baton->filter_name); + + delete baton; } void GitFilterRegistry::UnregisterWorker::HandleOKCallback() { if (baton->error_code == GIT_OK) { + static_pointer_cast(nodegit::Context::GetCurrentContext()->GetCleanupHandle("filterRegistry"))->registeredFilters.erase(baton->filter_name); v8::Local result = Nan::New(baton->error_code); v8::Local argv[2] = { Nan::Null(), @@ -188,12 +237,12 @@ void GitFilterRegistry::UnregisterWorker::HandleOKCallback() { else if (baton->error) { v8::Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method register has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method register has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.unregister").ToLocalChecked()); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.unregister").ToLocalChecked()); v8::Local argv[1] = { err }; @@ -203,9 +252,9 @@ void GitFilterRegistry::UnregisterWorker::HandleOKCallback() { free((void *)baton->error); } else if (baton->error_code < 0) { - v8::Local err = Nan::Error("Method unregister has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.unregister").ToLocalChecked()); + v8::Local err = Nan::To(Nan::Error("Method unregister has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("FilterRegistry.unregister").ToLocalChecked()); v8::Local argv[1] = { err }; @@ -214,6 +263,8 @@ void GitFilterRegistry::UnregisterWorker::HandleOKCallback() { else { callback->Call(0, NULL, async_resource); } + + free(baton->filter_name); + delete baton; - return; } diff --git a/generate/templates/manual/src/git_buf_converter.cc b/generate/templates/manual/src/git_buf_converter.cc index 0c19695041..da6f425f89 100644 --- a/generate/templates/manual/src/git_buf_converter.cc +++ b/generate/templates/manual/src/git_buf_converter.cc @@ -10,7 +10,7 @@ using namespace node; git_buf *GitBufConverter::Convert(Local val) { if (val->IsString() || val->IsStringObject()) { - v8::String::Utf8Value param1(val->ToString()); + Nan::Utf8String param1(Nan::To(val).ToLocalChecked()); std::string v8String = std::string(*param1); const size_t size = sizeof(git_buf); @@ -22,7 +22,7 @@ git_buf *GitBufConverter::Convert(Local val) { memcpy(memory, v8String.c_str(), stringLength); - result->asize = stringLength; + result->reserved = stringLength; result->size = stringLength; result->ptr = reinterpret_cast(memory); return result; diff --git a/generate/templates/manual/src/lock_master.cc b/generate/templates/manual/src/lock_master.cc index 30679b534b..8ad33378f9 100644 --- a/generate/templates/manual/src/lock_master.cc +++ b/generate/templates/manual/src/lock_master.cc @@ -1,246 +1,239 @@ #include #include -#include #include #include #include #include +#include +#include +#include #include "../include/lock_master.h" +namespace nodegit { + // information about a lockable object + // - the mutex used to lock it and the number of outstanding locks + struct ObjectInfo { + std::shared_ptr mutex; + unsigned useCount; + + ObjectInfo(unsigned useCount) + : mutex(new std::mutex), useCount(useCount) + {} + }; + + // LockMaster implementation details + // implemented in a separate class to keep LockMaster opaque + class LockMasterImpl { + // STATIC variables / methods + + // A map from objects that are locked (or were locked), to information on their mutex + static std::map mutexes; + // A mutex used for the mutexes map + static std::mutex mapMutex; + + // A thread local storage slot for the current thread-specific LockMasterImpl instance + thread_local static LockMasterImpl* currentLockMaster; + + // Cleans up any mutexes that are not currently used + static NAN_GC_CALLBACK(CleanupMutexes); + + public: + static void InitializeContext(); + + // INSTANCE variables / methods + + private: + // The set of objects this LockMaster is responsible for locking + std::set objectsToLock; + + // Mutexes locked by this LockMaster on construction and unlocked on destruction + std::vector> GetMutexes(int useCountDelta); + void Register(); + void Unregister(); + + public: + static LockMasterImpl *CurrentLockMasterImpl() { + return (LockMasterImpl *)currentLockMaster; + } -// information about a lockable object -// - the mutex used to lock it and the number of outstanding locks -struct ObjectInfo { - uv_mutex_t *mutex; - unsigned useCount; - - ObjectInfo(uv_mutex_t *mutex, unsigned useCount) - : mutex(mutex), useCount(useCount) - {} -}; - -// LockMaster implementation details -// implemented in a separate class to keep LockMaster opaque -class LockMasterImpl { - // STATIC variables / methods - - // A map from objects that are locked (or were locked), to information on their mutex - static std::map mutexes; - // A mutex used for the mutexes map - static uv_mutex_t mapMutex; - - // A libuv key used to store the current thread-specific LockMasterImpl instance - static uv_key_t currentLockMasterKey; - - // Cleans up any mutexes that are not currently used - static NAN_GC_CALLBACK(CleanupMutexes); + LockMasterImpl() { + Register(); + } -public: - static void Initialize(); + ~LockMasterImpl() { + Unregister(); + Unlock(true); + } - // INSTANCE variables / methods + void ObjectToLock(const void *objectToLock) { + objectsToLock.insert(objectToLock); + } -private: - // The set of objects this LockMaster is responsible for locking - std::set objectsToLock; + void Lock(bool acquireMutexes); + void Unlock(bool releaseMutexes); + }; - // Mutexes locked by this LockMaster on construction and unlocked on destruction - std::vector GetMutexes(int useCountDelta); - void Register(); - void Unregister(); + std::map LockMasterImpl::mutexes; + std::mutex LockMasterImpl::mapMutex; + thread_local LockMasterImpl* LockMasterImpl::currentLockMaster = nullptr; -public: - static LockMasterImpl *CurrentLockMasterImpl() { - return (LockMasterImpl *)uv_key_get(¤tLockMasterKey); + LockMaster::LockMaster(LockMaster &&other) { + impl = other.impl; + other.impl = nullptr; } - static LockMaster::Diagnostics GetDiagnostics(); - LockMasterImpl() { - Register(); - } + LockMaster &LockMaster::operator=(LockMaster &&other) { + if (&other == this) { + return *this; + } - ~LockMasterImpl() { - Unregister(); - Unlock(true); - } + impl = other.impl; + other.impl = nullptr; - void ObjectToLock(const void *objectToLock) { - objectsToLock.insert(objectToLock); + return *this; } - void Lock(bool acquireMutexes); - void Unlock(bool releaseMutexes); -}; - -std::map LockMasterImpl::mutexes; -uv_mutex_t LockMasterImpl::mapMutex; -uv_key_t LockMasterImpl::currentLockMasterKey; - -void LockMasterImpl::Initialize() { - uv_mutex_init(&mapMutex); - uv_key_create(¤tLockMasterKey); - Nan::AddGCEpilogueCallback(CleanupMutexes); -} + void LockMasterImpl::InitializeContext() { + Nan::AddGCEpilogueCallback(CleanupMutexes); + } -NAN_GC_CALLBACK(LockMasterImpl::CleanupMutexes) { - // skip cleanup if thread safety is disabled - // this means that turning thread safety on and then off - // could result in remaining mutexes - but they would get cleaned up - // if thread safety is turned on again - if (LockMaster::GetStatus() == LockMaster::Disabled) { - return; - } - - uv_mutex_lock(&mapMutex); - - for (auto it = mutexes.begin(); it != mutexes.end(); ) - { - uv_mutex_t *mutex = it->second.mutex; - unsigned useCount = it->second.useCount; - // if the mutex is not used by any LockMasters, - // we can destroy it - if (!useCount) { - uv_mutex_destroy(mutex); - free(mutex); - auto to_erase = it; - it++; - mutexes.erase(to_erase); - } else { - it++; + NAN_GC_CALLBACK(LockMasterImpl::CleanupMutexes) { + std::lock_guard lock(mapMutex); + + for (auto it = mutexes.begin(); it != mutexes.end(); ) + { + // if the mutex is not used by any LockMasters, + // we can destroy it + unsigned useCount = it->second.useCount; + if (!useCount) { + auto to_erase = it; + it++; + mutexes.erase(to_erase); + } else { + it++; + } } } - uv_mutex_unlock(&mapMutex); -} + void LockMaster::InitializeContext() { + LockMasterImpl::InitializeContext(); + } -void LockMaster::Initialize() { - LockMasterImpl::Initialize(); -} + std::vector> LockMasterImpl::GetMutexes(int useCountDelta) { + std::vector> objectMutexes; + std::lock_guard lock(mapMutex); + + for (auto object : objectsToLock) { + if (object) { + // ensure we have an initialized mutex for each object + auto mutexIt = mutexes.find(object); + if (mutexIt == mutexes.end()) { + mutexIt = mutexes.insert( + std::make_pair( + object, + ObjectInfo(0U) + ) + ).first; + } -std::vector LockMasterImpl::GetMutexes(int useCountDelta) { - std::vector objectMutexes; - - uv_mutex_lock(&mapMutex); - - for (auto object : objectsToLock) { - if(object) { - // ensure we have an initialized mutex for each object - auto mutexIt = mutexes.find(object); - if(mutexIt == mutexes.end()) { - mutexIt = mutexes.insert( - std::make_pair( - object, - ObjectInfo((uv_mutex_t *)malloc(sizeof(uv_mutex_t)), 0U) - ) - ).first; - uv_mutex_init(mutexIt->second.mutex); + objectMutexes.push_back(mutexIt->second.mutex); + mutexIt->second.useCount += useCountDelta; } - - objectMutexes.push_back(mutexIt->second.mutex); - mutexIt->second.useCount += useCountDelta; } - } - uv_mutex_unlock(&mapMutex); + return objectMutexes; + } - return objectMutexes; -} + void LockMasterImpl::Register() { + currentLockMaster = this; + } -void LockMasterImpl::Register() { - uv_key_set(¤tLockMasterKey, this); -} + void LockMasterImpl::Unregister() { + currentLockMaster = nullptr; + } -void LockMasterImpl::Unregister() { - uv_key_set(¤tLockMasterKey, NULL); -} + void LockMasterImpl::Lock(bool acquireMutexes) { + std::vector> objectMutexes = GetMutexes(acquireMutexes * 1); + + auto alreadyLocked = objectMutexes.end(); + std::vector>::iterator it; + + // we will attempt to lock all the mutexes at the same time to avoid deadlocks + // note in most cases we are locking 0 or 1 mutexes. more than 1 implies + // passing objects with different repos/owners in the same call. + do { + // go through all the mutexes and try to lock them + for (it = objectMutexes.begin(); it != objectMutexes.end(); it++) { + // if we already locked this mutex in a previous pass via std::mutex::lock, + // we don't need to lock it again + if (it == alreadyLocked) { + continue; + } -void LockMasterImpl::Lock(bool acquireMutexes) { - std::vector objectMutexes = GetMutexes(acquireMutexes * 1); - - auto alreadyLocked = objectMutexes.end(); - - // we will attempt to lock all the mutexes at the same time to avoid deadlocks - // note in most cases we are locking 0 or 1 mutexes. more than 1 implies - // passing objects with different repos/owners in the same call. - std::vector::iterator it; - do { - // go through all the mutexes and try to lock them - for(it = objectMutexes.begin(); it != objectMutexes.end(); it++) { - // if we already locked this mutex in a previous pass via uv_mutex_lock, - // we don't need to lock it again - if (it == alreadyLocked) { - continue; - } - // first, try to lock (non-blocking) - bool failure = uv_mutex_trylock(*it); - if(failure) { - // we have failed to lock a mutex... unlock everything we have locked - std::for_each(objectMutexes.begin(), it, uv_mutex_unlock); - if (alreadyLocked > it && alreadyLocked != objectMutexes.end()) { - uv_mutex_unlock(*alreadyLocked); + // first, try to lock (non-blocking) + bool success = (*it)->try_lock(); + if (!success) { + // we have failed to lock a mutex... unlock everything we have locked + std::for_each(objectMutexes.begin(), it, [](std::shared_ptr mutex) { + mutex->unlock(); + }); + + if (alreadyLocked > it && alreadyLocked != objectMutexes.end()) { + (*alreadyLocked)->unlock(); + } + + // now do a blocking lock on what we couldn't lock + (*it)->lock(); + // mark that we have already locked this one + // if there are more mutexes than this one, we will go back to locking everything + alreadyLocked = it; + break; } - // now do a blocking lock on what we couldn't lock - uv_mutex_lock(*it); - // mark that we have already locked this one - // if there are more mutexes than this one, we will go back to locking everything - alreadyLocked = it; - break; } - } - } while(it != objectMutexes.end()); -} - -void LockMasterImpl::Unlock(bool releaseMutexes) { - // Get the mutexes but don't decrement their use count until after we've - // unlocked them all. - std::vector objectMutexes = GetMutexes(0); + } while (it != objectMutexes.end()); + } - std::for_each(objectMutexes.begin(), objectMutexes.end(), uv_mutex_unlock); + void LockMasterImpl::Unlock(bool releaseMutexes) { + // Get the mutexes but don't decrement their use count until after we've + // unlocked them all. + std::vector> objectMutexes = GetMutexes(0); - GetMutexes(releaseMutexes * -1); -} + std::for_each(objectMutexes.begin(), objectMutexes.end(), [](std::shared_ptr mutex) { + mutex->unlock(); + }); -LockMaster::Diagnostics LockMasterImpl::GetDiagnostics() { - LockMaster::Diagnostics diagnostics; - uv_mutex_lock(&LockMasterImpl::mapMutex); - diagnostics.storedMutexesCount = mutexes.size(); - uv_mutex_unlock(&LockMasterImpl::mapMutex); - return diagnostics; -} + GetMutexes(releaseMutexes * -1); + } -// LockMaster + // LockMaster -void LockMaster::ConstructorImpl() { - impl = new LockMasterImpl(); -} + void LockMaster::ConstructorImpl() { + impl = new LockMasterImpl(); + } -void LockMaster::DestructorImpl() { - delete impl; -} + void LockMaster::DestructorImpl() { + delete impl; + } -void LockMaster::ObjectToLock(const void *objectToLock) { - impl->ObjectToLock(objectToLock); -} + void LockMaster::ObjectToLock(const void *objectToLock) { + impl->ObjectToLock(objectToLock); + } -void LockMaster::ObjectsToLockAdded() { - impl->Lock(true); -} + void LockMaster::ObjectsToLockAdded() { + impl->Lock(true); + } -LockMaster::Diagnostics LockMaster::GetDiagnostics() { - return LockMasterImpl::GetDiagnostics(); -} + // LockMaster::TemporaryUnlock -// LockMaster::TemporaryUnlock + void LockMaster::TemporaryUnlock::ConstructorImpl() { + impl = LockMasterImpl::CurrentLockMasterImpl(); + if (impl) { + impl->Unlock(false); + } + } -void LockMaster::TemporaryUnlock::ConstructorImpl() { - impl = LockMasterImpl::CurrentLockMasterImpl(); - if(impl) { - impl->Unlock(false); + void LockMaster::TemporaryUnlock::DestructorImpl() { + impl->Lock(false); } -} -void LockMaster::TemporaryUnlock::DestructorImpl() { - impl->Lock(false); } - -LockMaster::Status LockMaster::status = LockMaster::Disabled; diff --git a/generate/templates/manual/src/nodegit_wrapper.cc b/generate/templates/manual/src/nodegit_wrapper.cc index 26ead60dac..a790d7bc3c 100644 --- a/generate/templates/manual/src/nodegit_wrapper.cc +++ b/generate/templates/manual/src/nodegit_wrapper.cc @@ -1,5 +1,7 @@ template -NodeGitWrapper::NodeGitWrapper(typename Traits::cType *raw, bool selfFreeing, v8::Local owner) { +NodeGitWrapper::NodeGitWrapper(typename Traits::cType *raw, bool selfFreeing, v8::Local owner) + : nodegitContext(nodegit::Context::GetCurrentContext()) { + nodegitContext->LinkTrackerList(this); if (Traits::isSingleton) { ReferenceCounter::incrementCountForPointer((void *)raw); this->raw = raw; @@ -19,6 +21,7 @@ NodeGitWrapper::NodeGitWrapper(typename Traits::cType *raw, bool selfFre Traits::duplicate(&this->raw, raw); selfFreeing = true; } else { + SetNativeOwners(owner); this->owner.Reset(owner); this->raw = raw; } @@ -35,7 +38,8 @@ NodeGitWrapper::NodeGitWrapper(typename Traits::cType *raw, bool selfFre } template -NodeGitWrapper::NodeGitWrapper(const char *error) { +NodeGitWrapper::NodeGitWrapper(const char *error) + : nodegitContext(nodegit::Context::GetCurrentContext()) { selfFreeing = false; raw = NULL; Nan::ThrowError(error); @@ -43,11 +47,15 @@ NodeGitWrapper::NodeGitWrapper(const char *error) { template NodeGitWrapper::~NodeGitWrapper() { + Unlink(); if (Traits::isFreeable && selfFreeing) { Traits::free(raw); SelfFreeingInstanceCount--; raw = NULL; } + else if (!selfFreeing) { + --NonSelfFreeingConstructedCount; + } } template @@ -67,7 +75,7 @@ NAN_METHOD(NodeGitWrapper::JSNewFunction) { instance = new cppClass(static_cast( Local::Cast(info[0])->Value()), Nan::To(info[1]).FromJust(), - info.Length() >= 3 && !info[2].IsEmpty() && info[2]->IsObject() ? info[2]->ToObject() : Local() + info.Length() >= 3 && !info[2].IsEmpty() && info[2]->IsObject() ? Nan::To(info[2]).ToLocalChecked() : Local() ); } @@ -75,13 +83,44 @@ NAN_METHOD(NodeGitWrapper::JSNewFunction) { info.GetReturnValue().Set(info.This()); } +template +void NodeGitWrapper::SetNativeOwners(v8::Local owners) { + assert(owners->IsArray() || owners->IsObject()); + Nan::HandleScope scope; + std::unique_ptr< std::vector > trackerOwners = + std::make_unique< std::vector >(); + + if (owners->IsArray()) { + v8::Local context = Nan::GetCurrentContext(); + const v8::Local ownersArray = owners.As(); + const uint32_t numOwners = ownersArray->Length(); + + for (uint32_t i = 0; i < numOwners; ++i) { + v8::Local value = ownersArray->Get(context, i).ToLocalChecked(); + const v8::Local object = value.As(); + Nan::ObjectWrap *objectWrap = Nan::ObjectWrap::Unwrap(object); + trackerOwners->push_back(static_cast(objectWrap)); + } + } + else if (owners->IsObject()) { + Nan::ObjectWrap *objectWrap = Nan::ObjectWrap::Unwrap(owners); + trackerOwners->push_back(static_cast(objectWrap)); + } + + SetTrackerWrapOwners(std::move(trackerOwners)); +} + template v8::Local NodeGitWrapper::New(const typename Traits::cType *raw, bool selfFreeing, v8::Local owner) { Nan::EscapableHandleScope scope; Local argv[3] = { Nan::New((void *)raw), Nan::New(selfFreeing), owner }; + nodegit::Context *nodegitContext = nodegit::Context::GetCurrentContext(); + Local constructor_template = nodegitContext->GetFromPersistent( + std::string(Traits::className()) + "::Template" + ).As(); return scope.Escape( Nan::NewInstance( - Nan::New(constructor_template), + constructor_template, owner.IsEmpty() ? 2 : 3, // passing an empty handle as part of the arguments causes a crash argv ).ToLocalChecked()); @@ -98,13 +137,10 @@ void NodeGitWrapper::ClearValue() { } template -Nan::Persistent NodeGitWrapper::constructor_template; +thread_local int NodeGitWrapper::SelfFreeingInstanceCount; template -int NodeGitWrapper::SelfFreeingInstanceCount; - -template -int NodeGitWrapper::NonSelfFreeingConstructedCount; +thread_local int NodeGitWrapper::NonSelfFreeingConstructedCount; template NAN_METHOD(NodeGitWrapper::GetSelfFreeingInstanceCount) { @@ -121,3 +157,30 @@ void NodeGitWrapper::InitializeTemplate(v8::Local Nan::SetMethod(tpl, "getSelfFreeingInstanceCount", GetSelfFreeingInstanceCount); Nan::SetMethod(tpl, "getNonSelfFreeingConstructedCount", GetNonSelfFreeingConstructedCount); } + +template +void NodeGitWrapper::Reference() { + Ref(); + for (auto &i : referenceCallbacks) { + i.second(); + } +} + +template +void NodeGitWrapper::Unreference() { + Unref(); + for (auto &i : unreferenceCallbacks) { + i.second(); + } +} + +template +void NodeGitWrapper::AddReferenceCallbacks(size_t fieldIndex, std::function refCb, std::function unrefCb) { + referenceCallbacks[fieldIndex] = refCb; + unreferenceCallbacks[fieldIndex] = unrefCb; +} + +template +void NodeGitWrapper::SaveCleanupHandle(std::shared_ptr cleanupHandle) { + childCleanupVector.push_back(cleanupHandle); +} diff --git a/generate/templates/manual/src/promise_completion.cc b/generate/templates/manual/src/promise_completion.cc index fd34dfa770..d3e4f4426e 100644 --- a/generate/templates/manual/src/promise_completion.cc +++ b/generate/templates/manual/src/promise_completion.cc @@ -1,34 +1,46 @@ #include #include "../include/promise_completion.h" -Nan::Persistent PromiseCompletion::newFn; -Nan::Persistent PromiseCompletion::promiseFulfilled; -Nan::Persistent PromiseCompletion::promiseRejected; - // initializes the persistent handles for NAN_METHODs -void PromiseCompletion::InitializeComponent() { - v8::Local newTemplate = Nan::New(New); - newTemplate->InstanceTemplate()->SetInternalFieldCount(1); - newFn.Reset(newTemplate->GetFunction()); - - promiseFulfilled.Reset(Nan::New(PromiseFulfilled)->GetFunction()); - promiseRejected.Reset(Nan::New(PromiseRejected)->GetFunction()); +void PromiseCompletion::InitializeComponent(nodegit::Context *nodegitContext) { + Nan::HandleScope scope; + v8::Local nodegitExternal = Nan::New(nodegitContext); + v8::Local newTemplate = Nan::New(New, nodegitExternal); + newTemplate->InstanceTemplate()->SetInternalFieldCount(2); + + nodegitContext->SaveToPersistent( + "PromiseCompletion::Template", + Nan::GetFunction(newTemplate).ToLocalChecked() + ); + + v8::Local promiseFulfilled = Nan::GetFunction( + Nan::New(PromiseFulfilled, nodegitExternal) + ).ToLocalChecked(); + nodegitContext->SaveToPersistent("PromiseCompletion::PromiseFulfilled", promiseFulfilled); + + v8::Local promiseRejected = Nan::GetFunction( + Nan::New(PromiseRejected, nodegitExternal) + ).ToLocalChecked(); + nodegitContext->SaveToPersistent("PromiseCompletion::PromiseRejected", promiseRejected); } -bool PromiseCompletion::ForwardIfPromise(v8::Local result, AsyncBaton *baton, Callback callback) +bool PromiseCompletion::ForwardIfPromise(v8::Local result, nodegit::AsyncBaton *baton, Callback callback) { Nan::HandleScope scope; // check if the result is a promise if (!result.IsEmpty() && result->IsObject()) { - Nan::MaybeLocal maybeThenProp = Nan::Get(result->ToObject(), Nan::New("then").ToLocalChecked()); + Nan::MaybeLocal maybeThenProp = Nan::Get(Nan::To(result).ToLocalChecked(), Nan::New("then").ToLocalChecked()); if (!maybeThenProp.IsEmpty()) { v8::Local thenProp = maybeThenProp.ToLocalChecked(); if(thenProp->IsFunction()) { // we can be reasonably certain that the result is a promise // create a new v8 instance of PromiseCompletion - v8::Local object = Nan::NewInstance(Nan::New(newFn)).ToLocalChecked(); + nodegit::Context *nodegitContext = nodegit::Context::GetCurrentContext(); + v8::Local constructor_template = nodegitContext->GetFromPersistent("PromiseCompletion::Template") + .As(); + v8::Local object = Nan::NewInstance(constructor_template).ToLocalChecked(); // set up the native PromiseCompletion object PromiseCompletion *promiseCompletion = ObjectWrap::Unwrap(object); @@ -50,13 +62,18 @@ NAN_METHOD(PromiseCompletion::New) { } // sets up a Promise to forward the promise result via the baton and callback -void PromiseCompletion::Setup(v8::Local thenFn, v8::Local result, AsyncBaton *baton, Callback callback) { +void PromiseCompletion::Setup(v8::Local thenFn, v8::Local result, nodegit::AsyncBaton *baton, Callback callback) { this->callback = callback; this->baton = baton; - v8::Local promise = result->ToObject(); + v8::Local promise = Nan::To(result).ToLocalChecked(); + nodegit::Context *nodegitContext = nodegit::Context::GetCurrentContext(); v8::Local thisHandle = handle(); + v8::Local promiseFulfilled = nodegitContext->GetFromPersistent("PromiseCompletion::PromiseFulfilled") + .As(); + v8::Local promiseRejected = nodegitContext->GetFromPersistent("PromiseCompletion::PromiseRejected") + .As(); v8::Local argv[2] = { Bind(promiseFulfilled, thisHandle), @@ -64,21 +81,21 @@ void PromiseCompletion::Setup(v8::Local thenFn, v8::Local PromiseCompletion::Bind(Nan::Persistent &function, v8::Local object) { +v8::Local PromiseCompletion::Bind(v8::Local function, v8::Local object) { Nan::EscapableHandleScope scope; v8::Local bind = - Nan::Get(Nan::New(function), Nan::New("bind").ToLocalChecked()) + Nan::Get(function, Nan::New("bind").ToLocalChecked()) .ToLocalChecked().As(); v8::Local argv[1] = { object }; - return scope.Escape(bind->Call(Nan::New(function), 1, argv)); + return scope.Escape(Nan::Call(bind, Nan::To(function).ToLocalChecked(), 1, argv).ToLocalChecked()); } // calls the callback stored in the PromiseCompletion, passing the baton that @@ -90,7 +107,7 @@ void PromiseCompletion::CallCallback(bool isFulfilled, const Nan::FunctionCallba resultOfPromise = info[0]; } - PromiseCompletion *promiseCompletion = ObjectWrap::Unwrap(info.This()->ToObject()); + PromiseCompletion *promiseCompletion = ObjectWrap::Unwrap(Nan::To(info.This()).ToLocalChecked()); (*promiseCompletion->callback)(isFulfilled, promiseCompletion->baton, resultOfPromise); } diff --git a/generate/templates/manual/src/reference_counter.cc b/generate/templates/manual/src/reference_counter.cc index 1adc1df4b9..e3bc483a7a 100644 --- a/generate/templates/manual/src/reference_counter.cc +++ b/generate/templates/manual/src/reference_counter.cc @@ -1,7 +1,7 @@ #include "../include/reference_counter.h" void ReferenceCounter::incrementCountForPointer(void *ptr) { - LockMaster(true, &referenceCountByPointer); + nodegit::LockMaster lm(true, &referenceCountByPointer); if (referenceCountByPointer.find(ptr) == referenceCountByPointer.end()) { referenceCountByPointer[ptr] = 1; } else { @@ -10,7 +10,7 @@ void ReferenceCounter::incrementCountForPointer(void *ptr) { } unsigned long ReferenceCounter::decrementCountForPointer(void *ptr) { - LockMaster(true, &referenceCountByPointer); + nodegit::LockMaster lm(true, &referenceCountByPointer); unsigned long referenceCount = referenceCountByPointer[ptr]; if (referenceCount == 1) { referenceCountByPointer.erase(ptr); diff --git a/generate/templates/manual/src/str_array_converter.cc b/generate/templates/manual/src/str_array_converter.cc index c66f901c36..5d04c65622 100644 --- a/generate/templates/manual/src/str_array_converter.cc +++ b/generate/templates/manual/src/str_array_converter.cc @@ -9,15 +9,15 @@ using namespace v8; using namespace node; -git_strarray *StrArrayConverter::Convert(Local val) { - if (!val->BooleanValue()) { +git_strarray *StrArrayConverter::Convert(v8::Local val) { + if (!Nan::To(val).FromJust()) { return NULL; } else if (val->IsArray()) { - return ConvertArray(Array::Cast(*val)); + return ConvertArray(v8::Local::Cast(val)); } else if (val->IsString() || val->IsStringObject()) { - return ConvertString(val->ToString()); + return ConvertString(Nan::To(val).ToLocalChecked()); } else { return NULL; @@ -33,18 +33,18 @@ git_strarray * StrArrayConverter::AllocStrArray(const size_t count) { return result; } -git_strarray *StrArrayConverter::ConvertArray(Array *val) { +git_strarray *StrArrayConverter::ConvertArray(v8::Local val) { git_strarray *result = AllocStrArray(val->Length()); for(size_t i = 0; i < result->count; i++) { - Nan::Utf8String entry(val->Get(i)); + Nan::Utf8String entry(Nan::Get(val, i).ToLocalChecked()); result->strings[i] = strdup(*entry); } return result; } -git_strarray* StrArrayConverter::ConvertString(Local val) { +git_strarray* StrArrayConverter::ConvertString(v8::Local val) { char *strings[1]; Nan::Utf8String utf8String(val); @@ -62,3 +62,19 @@ git_strarray *StrArrayConverter::ConstructStrArray(int argc, char** argv) { return result; } + +void StrArrayConverter::ConvertInto(git_strarray *out, v8::Local val) { + out->count = val->Length(); + out->strings = (char**) malloc(out->count * sizeof(char*)); + for (uint32_t i = 0; i < out->count; ++i) { + Nan::Utf8String utf8String(Nan::Get(val, i).ToLocalChecked().As()); + out->strings[i] = strdup(*utf8String); + } +} + +void StrArrayConverter::ConvertInto(git_strarray *out, v8::Local val) { + Nan::Utf8String utf8String(val); + out->count = 1; + out->strings = (char**) malloc(out->count * sizeof(char*)); + out->strings[0] = strdup(*utf8String); +} diff --git a/generate/templates/manual/src/thread_pool.cc b/generate/templates/manual/src/thread_pool.cc index 7eadf0421e..4cd5c095a3 100644 --- a/generate/templates/manual/src/thread_pool.cc +++ b/generate/templates/manual/src/thread_pool.cc @@ -1,102 +1,808 @@ +#include +#include "../include/context.h" #include "../include/thread_pool.h" -ThreadPool::ThreadPool(int numberOfThreads, uv_loop_t *loop) { - uv_mutex_init(&workMutex); - uv_sem_init(&workSemaphore, 0); +#include +#include +#include +#include +#include +#include // Temporary workaround for LFS checkout. Code added to be reverted. - uv_async_init(loop, &loopAsync, RunLoopCallbacks); - loopAsync.data = this; - uv_unref((uv_handle_t *)&loopAsync); - uv_mutex_init(&loopMutex); +extern "C" { + #include +} + +using namespace std::placeholders; + +namespace nodegit { + class Executor { + public: + struct Task { + enum Type { SHUTDOWN, WORK }; + + Task(Type initType) + : type(initType) + {} + Task(const Task &) = delete; + Task(Task &&) = delete; + Task &operator=(const Task &) = delete; + Task &operator=(Task &&) = delete; + + // We must define a virtual destructor so that derived classes are castable + virtual ~Task() {} + + Type type; + }; + + struct ShutdownTask : Task { + ShutdownTask() + : Task(SHUTDOWN) + {} + }; + + struct WorkTask : Task { + WorkTask(ThreadPool::Callback initCallback, Nan::AsyncResource *asyncResource, Nan::Global *callbackErrorHandle) + : Task(WORK), asyncResource(asyncResource), callbackErrorHandle(callbackErrorHandle), callback(initCallback) + {} + + Nan::AsyncResource *asyncResource; + Nan::Global *callbackErrorHandle; + ThreadPool::Callback callback; + }; + + typedef std::function PostCallbackEventToOrchestratorFn; + typedef std::function PostCompletedEventToOrchestratorFn; + typedef std::function()> TakeNextTaskFn; + + struct Event { + enum Type { COMPLETED, CALLBACK_TYPE }; + Event(Type initType) + : type(initType) + {} + Event(const Event &) = delete; + Event(Event &&) = delete; + Event &operator=(const Event &) = delete; + Event &operator=(Event &&) = delete; + + Type type; + + // We must define a virtual destructor so that derived classes are castable + virtual ~Event() {} + }; + + struct CompletedEvent : Event { + CompletedEvent() + : Event(COMPLETED) + {} + }; + + struct CallbackEvent : Event { + CallbackEvent(ThreadPool::OnPostCallbackFn initCallback) + : Event(CALLBACK_TYPE), callback(initCallback) + {} + + // Temporary workaround for LFS checkout. Code modified to be reverted. + // ThreadPool::Callback operator()(ThreadPool::QueueCallbackFn queueCb, ThreadPool::Callback completedCb) { + // return callback(queueCb, completedCb); + ThreadPool::Callback operator()(ThreadPool::QueueCallbackFn queueCb, ThreadPool::Callback completedCb, bool isThreaded) { + return callback(queueCb, completedCb, isThreaded); + } + + private: + ThreadPool::OnPostCallbackFn callback; + }; + + Executor( + PostCallbackEventToOrchestratorFn postCallbackEventToOrchestrator, + PostCompletedEventToOrchestratorFn postCompletedEventToOrchestrator, + TakeNextTaskFn takeNextTask, + nodegit::Context *context + ); + + void RunTaskLoop(); + + // Orchestrator needs to call this to ensure that the executor is done reading from + // the Orchestrator's memory + void WaitForThreadClose(); + + // Temporary workaround for LFS checkout. Code added to be reverted. + // Returns true if the task running spawned threads within libgit2 + bool IsGitThreaded() { return currentGitThreads > kInitialGitThreads; } + + static Nan::AsyncResource *GetCurrentAsyncResource(); + + static const nodegit::Context *GetCurrentContext(); - workInProgressCount = 0; + static Nan::Global *GetCurrentCallbackErrorHandle(); - for(int i=0; i *currentCallbackErrorHandle; + nodegit::Context *currentContext; + + // We need to populate the executor on every thread that libgit2 + // could make a callback on so that it can correctly queue callbacks + // in the correct javascript context + thread_local static Executor *executor; + thread_local static bool isExecutorThread; + PostCallbackEventToOrchestratorFn postCallbackEventToOrchestrator; + PostCompletedEventToOrchestratorFn postCompletedEventToOrchestrator; + TakeNextTaskFn takeNextTask; + std::thread thread; + + // Temporary workaround for LFS checkout. Code added to be reverted. + static constexpr int kInitialGitThreads {0}; + // Number of threads spawned internally by libgit2 to deal with + // the task of this Executor instance. Defaults to kInitialGitThreads. + std::atomic currentGitThreads {kInitialGitThreads}; + }; + + Executor::Executor( + PostCallbackEventToOrchestratorFn postCallbackEventToOrchestrator, + PostCompletedEventToOrchestratorFn postCompletedEventToOrchestrator, + TakeNextTaskFn takeNextTask, + nodegit::Context *context + ) + : currentAsyncResource(nullptr), + currentCallbackErrorHandle(nullptr), + currentContext(context), + postCallbackEventToOrchestrator(postCallbackEventToOrchestrator), + postCompletedEventToOrchestrator(postCompletedEventToOrchestrator), + takeNextTask(takeNextTask), + thread(&Executor::RunTaskLoop, this) + {} + + void Executor::RunTaskLoop() { + // Set the thread local storage so that libgit2 can pick up the current executor + // for the thread. + isExecutorThread = true; + executor = this; + + for ( ; ; ) { + std::unique_ptr task = takeNextTask(); + if (task->type == Task::Type::SHUTDOWN) { + return; + } + + WorkTask *workTask = static_cast(task.get()); + + // Temporary workaround for LFS checkout. Code added to be reverted. + currentGitThreads = kInitialGitThreads; + + currentAsyncResource = workTask->asyncResource; + currentCallbackErrorHandle = workTask->callbackErrorHandle; + workTask->callback(); + currentCallbackErrorHandle = nullptr; + currentAsyncResource = nullptr; + + postCompletedEventToOrchestrator(); + } } -} -void ThreadPool::QueueWork(Callback workCallback, Callback completionCallback, void *data) { - uv_mutex_lock(&workMutex); - // there is work on the thread pool - reference the handle so - // node doesn't terminate - uv_ref((uv_handle_t *)&loopAsync); - workQueue.push(Work(workCallback, completionCallback, data)); - workInProgressCount++; - uv_mutex_unlock(&workMutex); - uv_sem_post(&workSemaphore); -} + void Executor::WaitForThreadClose() { + thread.join(); + } -void ThreadPool::QueueLoopCallback(Callback callback, void *data, bool isWork) { - // push the callback into the queue - uv_mutex_lock(&loopMutex); - LoopCallback loopCallback(callback, data, isWork); - bool queueWasEmpty = loopQueue.empty(); - loopQueue.push(loopCallback); - // we only trigger RunLoopCallbacks via the loopAsync handle if the queue - // was empty. Otherwise, we depend on RunLoopCallbacks to re-trigger itself - if (queueWasEmpty) { - uv_async_send(&loopAsync); - } - uv_mutex_unlock(&loopMutex); -} + Nan::AsyncResource *Executor::GetCurrentAsyncResource() { + if (executor) { + return executor->currentAsyncResource; + } -void ThreadPool::ExecuteReverseCallback(Callback reverseCallback, void *data) { - QueueLoopCallback(reverseCallback, data, false); -} + // NOTE this should always be set when a libgit2 callback is running, + // so this case should not happen. + return nullptr; + } -void ThreadPool::RunEventQueue(void *threadPool) { - static_cast(threadPool)->RunEventQueue(); -} + const nodegit::Context *Executor::GetCurrentContext() { + if (executor) { + return executor->currentContext; + } -void ThreadPool::RunEventQueue() { - for ( ; ; ) { - // wait until there is work to do - uv_sem_wait(&workSemaphore); - uv_mutex_lock(&workMutex); - // the semaphore should guarantee that queue is not empty - Work work = workQueue.front(); - workQueue.pop(); - uv_mutex_unlock(&workMutex); + // NOTE this should always be set when a libgit2 callback is running, + // so this case should not happen. + return nullptr; + } - // perform the queued work - (*work.workCallback)(work.data); + Nan::Global *Executor::GetCurrentCallbackErrorHandle() { + if (executor) { + return executor->currentCallbackErrorHandle; + } - // schedule the completion callback on the loop - QueueLoopCallback(work.completionCallback, work.data, true); + // NOTE this should always be set when a libgit2 callback is running, + // so this case should not happen. + return nullptr; } -} -void ThreadPool::RunLoopCallbacks(uv_async_t* handle) { - static_cast(handle->data)->RunLoopCallbacks(); -} + void Executor::PostCallbackEvent(ThreadPool::OnPostCallbackFn onPostCallback) { + if (executor) { + executor->postCallbackEventToOrchestrator(onPostCallback); + } + } + + void *Executor::RetrieveTLSForLibgit2ChildThread() { + // Temporary workaround for LFS checkout. Code added to be reverted. + ++Executor::executor->currentGitThreads; + return Executor::executor; + } + + void Executor::SetTLSForLibgit2ChildThread(void *vexecutor) { + Executor::executor = static_cast(vexecutor); + } + + void Executor::TeardownTLSOnLibgit2ChildThread() { + if (!isExecutorThread) { + // Temporary workaround for LFS checkout. Code added to be reverted. + --Executor::executor->currentGitThreads; + Executor::executor = nullptr; + } + } + + thread_local Executor *Executor::executor = nullptr; + thread_local bool Executor::isExecutorThread = false; + + class Orchestrator { + public: + struct Job { + enum Type { SHUTDOWN, ASYNC_WORK }; + Job(Type initType) + : type(initType) + {} + Job(const Job &) = delete; + Job(Job &&) = delete; + Job &operator=(const Job &) = delete; + Job &operator=(Job &&) = delete; + + virtual ~Job() {} + + Type type; + }; + + struct ShutdownJob : Job { + ShutdownJob() + : Job(SHUTDOWN) + {} + }; + + struct AsyncWorkJob : Job { + AsyncWorkJob(nodegit::AsyncWorker *initWorker) + : Job(ASYNC_WORK), worker(initWorker) + {} + + nodegit::AsyncWorker *worker; + }; + + typedef std::function QueueCallbackOnJSThreadFn; + typedef std::function()> TakeNextJobFn; + + private: + class OrchestratorImpl { + public: + OrchestratorImpl( + QueueCallbackOnJSThreadFn queueCallbackOnJSThread, + TakeNextJobFn takeNextJob, + nodegit::Context *context + ); + + void RunJobLoop(); + + // The Executor will call this method to queue a CallbackEvent in Orchestrator's event loop + void PostCallbackEvent(ThreadPool::OnPostCallbackFn onPostCallback); + + // The Executor will call this method after completion its work. This should queue + // a CompletedEvent in Thread's event loop + void PostCompletedEvent(); + + // This will be used by Executor to take jobs that the Thread has picked up and run them. + std::unique_ptr TakeNextTask(); + + // This is used to wait for the Orchestrator's thread to shutdown after signaling shutdown + void WaitForThreadClose(); + + private: + // The only thread safe way to pull events from executorEventsQueue + std::shared_ptr TakeEventFromExecutor(); + + void ScheduleWorkTaskOnExecutor(ThreadPool::Callback callback, Nan::AsyncResource *asyncResource, Nan::Global *callbackErrorHandle); + + void ScheduleShutdownTaskOnExecutor(); + + std::condition_variable taskCondition; + std::unique_ptr taskMutex; + + std::queue> executorEventsQueue; + std::unique_ptr executorEventsMutex; + std::condition_variable executorEventsCondition; + + QueueCallbackOnJSThreadFn queueCallbackOnJSThread; + TakeNextJobFn takeNextJob; + std::unique_ptr task; + std::thread thread; + Executor executor; + }; + + std::unique_ptr impl; + + public: + Orchestrator( + QueueCallbackOnJSThreadFn queueCallbackOnJSThread, + TakeNextJobFn takeNextJob, + nodegit::Context *context + ); + + void WaitForThreadClose(); + }; + + Orchestrator::OrchestratorImpl::OrchestratorImpl( + QueueCallbackOnJSThreadFn queueCallbackOnJSThread, + TakeNextJobFn takeNextJob, + nodegit::Context *context + ) + : taskMutex(new std::mutex), + executorEventsMutex(new std::mutex), + queueCallbackOnJSThread(queueCallbackOnJSThread), + takeNextJob(takeNextJob), + task(nullptr), + thread(&Orchestrator::OrchestratorImpl::RunJobLoop, this), + executor( + std::bind(&Orchestrator::OrchestratorImpl::PostCallbackEvent, this, _1), + std::bind(&Orchestrator::OrchestratorImpl::PostCompletedEvent, this), + std::bind(&Orchestrator::OrchestratorImpl::TakeNextTask, this), + context + ) + {} + + void Orchestrator::OrchestratorImpl::RunJobLoop() { + for ( ; ; ) { + auto job = takeNextJob(); + switch (job->type) { + case Job::Type::SHUTDOWN: { + ScheduleShutdownTaskOnExecutor(); + executor.WaitForThreadClose(); + return; + } + + case Job::Type::ASYNC_WORK: { + std::shared_ptr asyncWorkJob = std::static_pointer_cast(job); + nodegit::AsyncWorker *worker = asyncWorkJob->worker; + // We lock at this level, because we temporarily unlock the lock master + // when a callback is fired. We need to be on the same thread to ensure + // the same thread that acquired the locks also releases them + nodegit::LockMaster lock = worker->AcquireLocks(); + ScheduleWorkTaskOnExecutor(std::bind(&nodegit::AsyncWorker::Execute, worker), worker->GetAsyncResource(), worker->GetCallbackErrorHandle()); + for ( ; ; ) { + std::shared_ptr event = TakeEventFromExecutor(); + if (event->type == Executor::Event::Type::COMPLETED) { + break; + } + + // We must have received a callback from libgit2 + auto callbackEvent = std::static_pointer_cast(event); + std::shared_ptr callbackMutex(new std::mutex); + std::shared_ptr callbackCondition(new std::condition_variable); + bool hasCompleted = false; + + // Temporary workaround for LFS checkout. Code removed to be reverted. + //LockMaster::TemporaryUnlock temporaryUnlock; + + // Temporary workaround for LFS checkout. Code added to be reverted. + bool isWorkerThreaded = executor.IsGitThreaded(); + ThreadPool::Callback callbackCompleted = []() {}; + if (!isWorkerThreaded) { + callbackCompleted = [callbackCondition, callbackMutex, &hasCompleted]() { + std::lock_guard lock(*callbackMutex); + hasCompleted = true; + callbackCondition->notify_one(); + }; + } + std::unique_ptr temporaryUnlock {nullptr}; + if (!isWorkerThreaded) { + temporaryUnlock = std::make_unique(); + } + + auto onCompletedCallback = (*callbackEvent)( + [this](ThreadPool::Callback callback, ThreadPool::Callback cancelCallback) { + queueCallbackOnJSThread(callback, cancelCallback, false); + }, + // Temporary workaround for LFS checkout. Code modified to be reverted. + /* + [callbackCondition, callbackMutex, &hasCompleted]() { + std::lock_guard lock(*callbackMutex); + hasCompleted = true; + callbackCondition->notify_one(); + } + */ + callbackCompleted, + isWorkerThreaded + ); + + // Temporary workaround for LFS checkout. Code modified to be reverted. + if (!isWorkerThreaded) { + std::unique_lock lock(*callbackMutex); + while (!hasCompleted) callbackCondition->wait(lock); + onCompletedCallback(); + } + } + + queueCallbackOnJSThread( + [worker]() { + worker->WorkComplete(); + worker->Destroy(); + }, + [worker]() { + worker->Cancel(); + worker->WorkComplete(); + worker->Destroy(); + }, + true + ); + } + } + } + } + + // TODO add a cancel callback to `OnPostCallbackFn` which can be used on nodegit terminate + void Orchestrator::OrchestratorImpl::PostCallbackEvent(ThreadPool::OnPostCallbackFn onPostCallback) { + std::lock_guard lock(*executorEventsMutex); + std::shared_ptr callbackEvent(new Executor::CallbackEvent(onPostCallback)); + executorEventsQueue.push(callbackEvent); + executorEventsCondition.notify_one(); + } + + void Orchestrator::OrchestratorImpl::PostCompletedEvent() { + std::lock_guard lock(*executorEventsMutex); + std::shared_ptr completedEvent(new Executor::CompletedEvent); + executorEventsQueue.push(completedEvent); + executorEventsCondition.notify_one(); + } + + std::shared_ptr Orchestrator::OrchestratorImpl::TakeEventFromExecutor() { + std::unique_lock lock(*executorEventsMutex); + while (executorEventsQueue.empty()) executorEventsCondition.wait(lock); + std::shared_ptr executorEvent = executorEventsQueue.front(); + executorEventsQueue.pop(); + return executorEvent; + } + + void Orchestrator::OrchestratorImpl::ScheduleShutdownTaskOnExecutor() { + std::lock_guard lock(*taskMutex); + task.reset(new Executor::ShutdownTask); + taskCondition.notify_one(); + } + + void Orchestrator::OrchestratorImpl::ScheduleWorkTaskOnExecutor(ThreadPool::Callback callback, Nan::AsyncResource *asyncResource, Nan::Global *callbackErrorHandle) { + std::lock_guard lock(*taskMutex); + task.reset(new Executor::WorkTask(callback, asyncResource, callbackErrorHandle)); + taskCondition.notify_one(); + } + + std::unique_ptr Orchestrator::OrchestratorImpl::TakeNextTask() { + std::unique_lock lock(*taskMutex); + while (!task) taskCondition.wait(lock); + return std::move(task); + } + + void Orchestrator::OrchestratorImpl::WaitForThreadClose() { + thread.join(); + } + + Orchestrator::Orchestrator( + QueueCallbackOnJSThreadFn queueCallbackOnJSThread, + TakeNextJobFn takeNextJob, + nodegit::Context *context + ) + : impl(new OrchestratorImpl(queueCallbackOnJSThread, takeNextJob, context)) + {} + + void Orchestrator::WaitForThreadClose() { + impl->WaitForThreadClose(); + } + + class ThreadPoolImpl { + public: + ThreadPoolImpl(int numberOfThreads, uv_loop_t *loop, nodegit::Context *context); + + void QueueWorker(nodegit::AsyncWorker *worker); + + std::shared_ptr TakeNextJob(); + + void QueueCallbackOnJSThread(ThreadPool::Callback callback, ThreadPool::Callback cancelCallback, bool isWork); + + static void RunLoopCallbacks(uv_async_t *handle); + + void Shutdown(std::unique_ptr cleanupHandle); + + struct AsyncCallbackData { + AsyncCallbackData(ThreadPoolImpl *pool) + : pool(pool) + {} + + std::unique_ptr cleanupHandle; + ThreadPoolImpl *pool; + }; + + private: + bool isMarkedForDeletion; + + struct JSThreadCallback { + JSThreadCallback(ThreadPool::Callback callback, ThreadPool::Callback cancelCallback, bool isWork) + : isWork(isWork), callback(callback), cancelCallback(cancelCallback) + {} + + JSThreadCallback() + : isWork(false), callback(nullptr), cancelCallback(nullptr) + {} + + void performCallback() { + callback(); + } + + void cancel() { + cancelCallback(); + } + + bool isWork; + + private: + ThreadPool::Callback callback; + ThreadPool::Callback cancelCallback; + }; + + void RunLoopCallbacks(); + + std::queue> orchestratorJobQueue; + std::unique_ptr orchestratorJobMutex; + std::condition_variable orchestratorJobCondition; + size_t workInProgressCount; + + // completion and async callbacks to be performed on the loop + std::queue jsThreadCallbackQueue; + std::unique_ptr jsThreadCallbackMutex; + uv_async_t jsThreadCallbackAsync; + + std::vector orchestrators; + }; + + // context required to be passed to Orchestrators, but ThreadPoolImpl doesn't need to keep it + ThreadPoolImpl::ThreadPoolImpl(int numberOfThreads, uv_loop_t *loop, nodegit::Context *context) + : isMarkedForDeletion(false), + orchestratorJobMutex(new std::mutex), + jsThreadCallbackMutex(new std::mutex) + { + uv_async_init(loop, &jsThreadCallbackAsync, RunLoopCallbacks); + jsThreadCallbackAsync.data = new AsyncCallbackData(this); + uv_unref((uv_handle_t *)&jsThreadCallbackAsync); + + workInProgressCount = 0; + + for (int i = 0; i < numberOfThreads; i++) { + orchestrators.emplace_back( + std::bind(&ThreadPoolImpl::QueueCallbackOnJSThread, this, _1, _2, _3), + std::bind(&ThreadPoolImpl::TakeNextJob, this), + context + ); + } + } + + void ThreadPoolImpl::QueueWorker(nodegit::AsyncWorker *worker) { + std::lock_guard lock(*orchestratorJobMutex); + // there is work on the thread pool - reference the handle so + // node doesn't terminate + uv_ref((uv_handle_t *)&jsThreadCallbackAsync); + orchestratorJobQueue.emplace(new Orchestrator::AsyncWorkJob(worker)); + workInProgressCount++; + orchestratorJobCondition.notify_one(); + } + + std::shared_ptr ThreadPoolImpl::TakeNextJob() { + std::unique_lock lock(*orchestratorJobMutex); + while (orchestratorJobQueue.empty()) orchestratorJobCondition.wait(lock); + auto orchestratorJob = orchestratorJobQueue.front(); + + // When the thread pool is shutting down, the thread pool will drain the work queue and replace it with + // a single shared_ptr to a shutdown job, so don't pop the queue when we're shutting down so + // everyone gets the signal + if (orchestratorJob->type != Orchestrator::Job::Type::SHUTDOWN) { + orchestratorJobQueue.pop(); + } + + return orchestratorJob; + } -void ThreadPool::RunLoopCallbacks() { - // get the next callback to run - uv_mutex_lock(&loopMutex); - LoopCallback loopCallback = loopQueue.front(); - uv_mutex_unlock(&loopMutex); + void ThreadPoolImpl::QueueCallbackOnJSThread(ThreadPool::Callback callback, ThreadPool::Callback cancelCallback, bool isWork) { + std::unique_lock lock(*jsThreadCallbackMutex); + // When the threadpool is shutting down, we want to free up the executors to also shutdown + // that means that we need to cancel all non-work callbacks as soon as we see them and + // we know that we are shutting down + if (isMarkedForDeletion && !isWork) { + // we don't know how long the cancelCallback will take, and it certainly doesn't need the lock + // while we're running it, so unlock it immediately. + lock.unlock(); + cancelCallback(); + return; + } - // perform the queued loop callback - (*loopCallback.callback)(loopCallback.data); + bool queueWasEmpty = jsThreadCallbackQueue.empty(); + jsThreadCallbackQueue.emplace(callback, cancelCallback, isWork); + // we only trigger RunLoopCallbacks via the jsThreadCallbackAsync handle if the queue + // was empty. Otherwise, we depend on RunLoopCallbacks to re-trigger itself + if (queueWasEmpty) { + uv_async_send(&jsThreadCallbackAsync); + } + } - // pop the queue, and if necessary, re-trigger RunLoopCallbacks - uv_mutex_lock(&loopMutex); - loopQueue.pop(); - if (!loopQueue.empty()) { - uv_async_send(&loopAsync); + void ThreadPoolImpl::RunLoopCallbacks(uv_async_t* handle) { + auto asyncCallbackData = static_cast(handle->data); + if (asyncCallbackData->pool) { + asyncCallbackData->pool->RunLoopCallbacks(); + } } - uv_mutex_unlock(&loopMutex); - // if there is no ongoing work / completion processing, node doesn't need - // to be prevented from terminating - if (loopCallback.isWork) { - uv_mutex_lock(&workMutex); - workInProgressCount --; - if(!workInProgressCount) { - uv_unref((uv_handle_t *)&loopAsync); + // NOTE this should theoretically never be triggered during a cleanup operation + void ThreadPoolImpl::RunLoopCallbacks() { + Nan::HandleScope scope; + v8::Local context = Nan::GetCurrentContext(); + node::CallbackScope callbackScope(context->GetIsolate(), Nan::New(), {0, 0}); + + std::unique_lock lock(*jsThreadCallbackMutex); + // get the next callback to run + JSThreadCallback jsThreadCallback = jsThreadCallbackQueue.front(); + jsThreadCallbackQueue.pop(); + + lock.unlock(); + jsThreadCallback.performCallback(); + lock.lock(); + + if (!jsThreadCallbackQueue.empty()) { + uv_async_send(&jsThreadCallbackAsync); + } + + // if there is no ongoing work / completion processing, node doesn't need + // to be prevented from terminating + if (jsThreadCallback.isWork) { + std::lock_guard orchestratorLock(*orchestratorJobMutex); + workInProgressCount--; + if (!workInProgressCount) { + uv_unref((uv_handle_t *)&jsThreadCallbackAsync); + } + } + } + + void ThreadPoolImpl::Shutdown(std::unique_ptr cleanupHandle) { + std::queue> cancelledJobs; + std::queue cancelledCallbacks; + { + std::unique_lock orchestratorLock(*orchestratorJobMutex, std::defer_lock); + std::unique_lock jsThreadLock(*jsThreadCallbackMutex, std::defer_lock); + std::lock(orchestratorLock, jsThreadLock); + + // Once we've marked for deletion, we will start cancelling all callbacks + // when an attempt to queue a callback is made + isMarkedForDeletion = true; + // We want to grab all of the jobs that have been queued and run their cancel routines + // so that we can clean up their resources + orchestratorJobQueue.swap(cancelledJobs); + // We also want to grab all callbacks that have been queued so that we can + // run their cancel routines, this will help terminate the async workers + // that are currently being executed complete so that the threads + // running them can exit cleanly + jsThreadCallbackQueue.swap(cancelledCallbacks); + // Pushing a ShutdownJob into the queue will instruct all threads + // to start their shutdown process when they see the job is available. + orchestratorJobQueue.emplace(new Orchestrator::ShutdownJob); + + if (workInProgressCount) { + // unref the jsThreadCallback for all work in progress + // it will not be used after this function has completed + while (workInProgressCount--) { + uv_unref((uv_handle_t *)&jsThreadCallbackAsync); + } + } + + orchestratorJobCondition.notify_all(); + } + + Nan::HandleScope scope; + v8::Local context = Nan::GetCurrentContext(); + node::CallbackScope callbackScope(context->GetIsolate(), Nan::New(), {0, 0}); + + while (cancelledJobs.size()) { + std::shared_ptr cancelledJob = cancelledJobs.front(); + std::shared_ptr asyncWorkJob = std::static_pointer_cast(cancelledJob); + + asyncWorkJob->worker->Cancel(); + asyncWorkJob->worker->WorkComplete(); + asyncWorkJob->worker->Destroy(); + + cancelledJobs.pop(); } - uv_mutex_unlock(&workMutex); + + // We need to cancel all callbacks that were scheduled before the shutdown + // request went through. This will help finish any work any currently operating + // executors are undertaking + while (cancelledCallbacks.size()) { + JSThreadCallback cancelledCallback = cancelledCallbacks.front(); + cancelledCallback.cancel(); + cancelledCallbacks.pop(); + } + + std::for_each(orchestrators.begin(), orchestrators.end(), [](Orchestrator &orchestrator) { + orchestrator.WaitForThreadClose(); + }); + + // After we have completed waiting for all threads to close + // we will need to cleanup the rest of the completion callbacks + // from workers that were still running when the shutdown signal + // was sent + std::lock_guard jsThreadLock(*jsThreadCallbackMutex); + while (jsThreadCallbackQueue.size()) { + JSThreadCallback jsThreadCallback = jsThreadCallbackQueue.front(); + jsThreadCallback.cancel(); + jsThreadCallbackQueue.pop(); + } + + AsyncCallbackData *asyncCallbackData = static_cast(jsThreadCallbackAsync.data); + asyncCallbackData->cleanupHandle.swap(cleanupHandle); + asyncCallbackData->pool = nullptr; + + uv_close(reinterpret_cast(&jsThreadCallbackAsync), [](uv_handle_t *handle) { + auto asyncCallbackData = static_cast(handle->data); + delete asyncCallbackData; + }); + } + + ThreadPool::ThreadPool(int numberOfThreads, uv_loop_t *loop, nodegit::Context *context) + : impl(new ThreadPoolImpl(numberOfThreads, loop, context)) + {} + + ThreadPool::~ThreadPool() {} + + void ThreadPool::QueueWorker(nodegit::AsyncWorker *worker) { + impl->QueueWorker(worker); + } + + void ThreadPool::PostCallbackEvent(OnPostCallbackFn onPostCallback) { + Executor::PostCallbackEvent(onPostCallback); + } + + Nan::AsyncResource *ThreadPool::GetCurrentAsyncResource() { + return Executor::GetCurrentAsyncResource(); + } + + const nodegit::Context *ThreadPool::GetCurrentContext() { + return Executor::GetCurrentContext(); + } + + Nan::Global *ThreadPool::GetCurrentCallbackErrorHandle() { + return Executor::GetCurrentCallbackErrorHandle(); + } + + void ThreadPool::Shutdown(std::unique_ptr cleanupHandle) { + impl->Shutdown(std::move(cleanupHandle)); + } + + void ThreadPool::InitializeGlobal() { + git_custom_tls_set_callbacks( + Executor::RetrieveTLSForLibgit2ChildThread, + Executor::SetTLSForLibgit2ChildThread, + Executor::TeardownTLSOnLibgit2ChildThread + ); } } diff --git a/generate/templates/manual/src/tracker_wrap.cc b/generate/templates/manual/src/tracker_wrap.cc new file mode 100644 index 0000000000..85034d88de --- /dev/null +++ b/generate/templates/manual/src/tracker_wrap.cc @@ -0,0 +1,238 @@ +#include "../include/tracker_wrap.h" + +#include +#include + +namespace { + /** + * \class TrackerWrapTreeNode + * + * Parents of a TrackerWrapTreeNode will be the nodes holding TrackerWrap objects that + * are owners of the TrackerWrap object that this node holds. The same way for its children. + */ + class TrackerWrapTreeNode + { + public: + TrackerWrapTreeNode(nodegit::TrackerWrap *trackerWrap) : m_trackerWrap(trackerWrap) {} + TrackerWrapTreeNode() = delete; + ~TrackerWrapTreeNode(); + TrackerWrapTreeNode(const TrackerWrapTreeNode &other) = delete; + TrackerWrapTreeNode(TrackerWrapTreeNode &&other) = delete; + TrackerWrapTreeNode& operator=(const TrackerWrapTreeNode &other) = delete; + TrackerWrapTreeNode& operator=(TrackerWrapTreeNode &&other) = delete; + + inline const std::unordered_set& Children() const; + inline nodegit::TrackerWrap* TrackerWrap(); + inline void AddChild(TrackerWrapTreeNode *child); + + private: + std::unordered_set m_children {}; + nodegit::TrackerWrap *m_trackerWrap {}; + }; + + /** + * TrackerWrapTreeNode::~TrackerWrapTreeNode() + * Frees the memory of the TrackerWrap pointer it holds. + */ + TrackerWrapTreeNode::~TrackerWrapTreeNode() { + delete m_trackerWrap; + } + + /** + * TrackerWrapTreeNode::Children() + * + * Returns a reference to the children nodes of this. + */ + const std::unordered_set& TrackerWrapTreeNode::Children() const { + return m_children; + } + + /** + * TrackerWrapTreeNode::TrackerWrap() + * + * Returns a pointer to the node's TrackerWrap object. + */ + nodegit::TrackerWrap* TrackerWrapTreeNode::TrackerWrap() { + return m_trackerWrap; + } + + /** + * TrackerWrapTreeNode::AddChild() + */ + void TrackerWrapTreeNode::AddChild(TrackerWrapTreeNode *child) { + m_children.insert(child); + } + + /** + * \class TrackerWrapTrees + * + * Class containing a list of trees with nodes holding TrackerWrap objects. + * For a TrackerWrap object 'P' which owns another TrackerWrap object 'C', + * 'P' will be held in a node which will be the parent of the child node + * that holds 'C'. + * On destruction, nodes will be freed in a children-first way. + * + * NOTE: nodegit code previous to this change is prepared to manage an array of + * owners, so class TrackerWrapTrees considers multiple owners (parent nodes) too. + */ + class TrackerWrapTrees + { + public: + TrackerWrapTrees(nodegit::TrackerWrap::TrackerList *trackerList); + TrackerWrapTrees() = delete; + ~TrackerWrapTrees(); + TrackerWrapTrees(const TrackerWrapTrees &other) = delete; + TrackerWrapTrees(TrackerWrapTrees &&other) = delete; + TrackerWrapTrees& operator=(const TrackerWrapTrees &other) = delete; + TrackerWrapTrees& operator=(TrackerWrapTrees &&other) = delete; + + private: + void addNode(nodegit::TrackerWrap *trackerWrap); + void addParentNode(nodegit::TrackerWrap *owner, TrackerWrapTreeNode *child); + void deleteTree(TrackerWrapTreeNode *node); + void freeAllTreesChildrenFirst(); + + using TrackerWrapTreeNodeMap = std::unordered_map>; + + TrackerWrapTreeNodeMap m_mapTrackerWrapNode {}; + std::vector m_roots {}; + }; + + /** + * TrackerWrapTrees::TrackerWrapTrees(nodegit::TrackerWrap::TrackerList *trackerList) + * + * Unlinks items from trackerList and adds them to a tree. + * For each root (TrackerWrap item without owners), it adds a new tree root. + * + * \param trackerList TrackerList pointer from which the TrackerWrapTrees object will be created. + */ + TrackerWrapTrees::TrackerWrapTrees(nodegit::TrackerWrap::TrackerList *trackerList) + { + nodegit::TrackerWrap *trackerWrap {}; + while ((trackerWrap = nodegit::TrackerWrap::UnlinkFirst(trackerList)) != nullptr) { + addNode(trackerWrap); + } + } + + /* + * TrackerWrapTrees::~TrackerWrapTrees + */ + TrackerWrapTrees::~TrackerWrapTrees() { + freeAllTreesChildrenFirst(); + } + + /** + * TrackerWrapTrees::addNode + * + * \param trackerWrap pointer to the TrackerWrap object to add as a node in a tree. + */ + void TrackerWrapTrees::addNode(nodegit::TrackerWrap *trackerWrap) { + // add trackerWrap as a node + // NOTE: 'emplace' will create a temporal TrackerWrapTreeNode and will + // free it if trackerWrap already exists as a key. To prevent freeing + // the node at this moment we have to find it first. + auto addedNodeIter = m_mapTrackerWrapNode.find(trackerWrap); + if (addedNodeIter == m_mapTrackerWrapNode.end()) { + addedNodeIter = m_mapTrackerWrapNode.emplace(std::make_pair( + trackerWrap, std::make_unique(trackerWrap))).first; + } + TrackerWrapTreeNode *addedNode = addedNodeIter->second.get(); + + // if trackerWrap has no owners, add it as a root node + const std::vector *owners = trackerWrap->GetTrackerWrapOwners(); + if (owners == nullptr) { + m_roots.push_back(addedNode); + } + else { + // add addedNode's parents and link them with this child + for (nodegit::TrackerWrap *owner : *owners) { + addParentNode(owner, addedNode); + } + } + } + + /** + * TrackerWrapTrees::addParentNode + * + * \param owner TrackerWrap pointer for the new parent node to add. + * \param child TrackerWrapTreeNode pointer to be the child node of the new parent node to add. + */ + void TrackerWrapTrees::addParentNode(nodegit::TrackerWrap *owner, TrackerWrapTreeNode *child) + { + // adds a new parent node (holding the owner) + // NOTE: 'emplace' will create a temporal TrackerWrapTreeNode and will + // free it if trackerWrap already exists as a key. To prevent freeing + // the node at this moment we have to find it first. + auto addedParentNodeIter = m_mapTrackerWrapNode.find(owner); + if (addedParentNodeIter == m_mapTrackerWrapNode.end()) { + addedParentNodeIter = m_mapTrackerWrapNode.emplace(std::make_pair( + owner, std::make_unique(owner))).first; + } + TrackerWrapTreeNode *addedParentNode = addedParentNodeIter->second.get(); + + // links parent to child + addedParentNode->AddChild(child); + } + + /** + * TrackerWrapTrees::deleteTree + * + * Deletes the tree from the node passed as a parameter + * in a children-first way and recursively. + * + * \param node node from where to delete all its children and itself. + */ + void TrackerWrapTrees::deleteTree(TrackerWrapTreeNode *node) + { + // delete all node's children first + const std::unordered_set &children = node->Children(); + for (TrackerWrapTreeNode *child : children) { + // check that child hasn't been removed previously by another parent + if (m_mapTrackerWrapNode.find(child->TrackerWrap()) != m_mapTrackerWrapNode.end()) { + deleteTree(child); + } + } + + // then deletes itself from the container, which will + // actually free 'node' and the TrackerWrap object it holds + m_mapTrackerWrapNode.erase(node->TrackerWrap()); + } + + /** + * TrackerWrapTrees::freeAllTreesChildrenFirst + * + * Deletes all the trees held, in a children-first way. + */ + void TrackerWrapTrees::freeAllTreesChildrenFirst() { + for (TrackerWrapTreeNode *root : m_roots) { + deleteTree(root); + } + m_roots.clear(); + } +} // end anonymous namespace + + +namespace nodegit { + TrackerWrap* TrackerWrap::UnlinkFirst(TrackerList *listStart) { + assert(listStart != nullptr); + return listStart->m_next == nullptr ? nullptr : listStart->m_next->Unlink(); + } + + int TrackerWrap::SizeFromList(TrackerList *listStart) { + assert(listStart != nullptr); + TrackerList *t {listStart}; + int count {0}; + while (t->m_next != nullptr) { + ++count; + t = t->m_next; + } + return count; + } + + void TrackerWrap::DeleteFromList(TrackerList *listStart) { + assert(listStart != nullptr); + // creates an object TrackerWrapTrees, which will free + // the nodes of its trees in a children-first way + TrackerWrapTrees trackerWrapTrees(listStart); + } +} \ No newline at end of file diff --git a/generate/templates/manual/src/v8_helpers.cc b/generate/templates/manual/src/v8_helpers.cc new file mode 100644 index 0000000000..bd97fba34e --- /dev/null +++ b/generate/templates/manual/src/v8_helpers.cc @@ -0,0 +1,19 @@ +#include "../include/v8_helpers.h" + +namespace nodegit { + v8::Local safeGetField(v8::Local &containerObject, std::string field) { + auto maybeFieldName = Nan::New(field); + if (maybeFieldName.IsEmpty()) { + v8::Local emptyResult; + return emptyResult; + } + + auto maybeRetrievedField = Nan::Get(containerObject, maybeFieldName.ToLocalChecked()); + if (maybeRetrievedField.IsEmpty()) { + v8::Local emptyResult; + return emptyResult; + } + + return maybeRetrievedField.ToLocalChecked(); + } +} diff --git a/generate/templates/manual/src/wrapper.cc b/generate/templates/manual/src/wrapper.cc index ffd9bc5843..3bad23c7d4 100644 --- a/generate/templates/manual/src/wrapper.cc +++ b/generate/templates/manual/src/wrapper.cc @@ -16,18 +16,20 @@ Wrapper::Wrapper(void *raw) { this->raw = raw; } -void Wrapper::InitializeComponent(Local target) { +void Wrapper::InitializeComponent(Local target, nodegit::Context *nodegitContext) { Nan::HandleScope scope; - Local tpl = Nan::New(JSNewFunction); + Local nodegitExternal = Nan::New(nodegitContext); + Local tpl = Nan::New(JSNewFunction, nodegitExternal); - tpl->InstanceTemplate()->SetInternalFieldCount(1); + tpl->InstanceTemplate()->SetInternalFieldCount(2); tpl->SetClassName(Nan::New("Wrapper").ToLocalChecked()); - Nan::SetPrototypeMethod(tpl, "toBuffer", ToBuffer); + Nan::SetPrototypeMethod(tpl, "toBuffer", ToBuffer, nodegitExternal); - constructor_template.Reset(tpl); - Nan::Set(target, Nan::New("Wrapper").ToLocalChecked(), Nan::GetFunction(tpl).ToLocalChecked()); + Local constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); + nodegitContext->SaveToPersistent("Wrapper::Template", constructor_template); + Nan::Set(target, Nan::New("Wrapper").ToLocalChecked(), constructor_template); } NAN_METHOD(Wrapper::JSNewFunction) { @@ -47,8 +49,9 @@ Local Wrapper::New(const void *raw) { Local argv[1] = { Nan::New((void *)raw) }; Local instance; - Local constructorHandle = Nan::New(constructor_template); - instance = Nan::NewInstance(Nan::GetFunction(constructorHandle).ToLocalChecked(), 1, argv).ToLocalChecked(); + nodegit::Context *nodegitContext = nodegit::Context::GetCurrentContext(); + Local constructor_template = nodegitContext->GetFromPersistent("Wrapper::Template").As(); + instance = Nan::NewInstance(constructor_template, 1, argv).ToLocalChecked(); return scope.Escape(instance); } @@ -75,6 +78,3 @@ NAN_METHOD(Wrapper::ToBuffer) { info.GetReturnValue().Set(nodeBuffer); } - - -Nan::Persistent Wrapper::constructor_template; diff --git a/generate/templates/manual/tree/get_all_filepaths.cc b/generate/templates/manual/tree/get_all_filepaths.cc new file mode 100644 index 0000000000..758383980a --- /dev/null +++ b/generate/templates/manual/tree/get_all_filepaths.cc @@ -0,0 +1,157 @@ + +namespace TreeFilepathsHelpers { + +int iterateTreePaths(git_repository *repo, git_tree *tree, std::vector *paths,std::string *buffer) { + size_t size = git_tree_entrycount(tree); + for (size_t i = 0; i < size; i++) { + const git_tree_entry *entry = git_tree_entry_byindex(tree, i); + const git_filemode_t filemode = git_tree_entry_filemode(entry); + if (filemode == GIT_FILEMODE_BLOB || filemode == GIT_FILEMODE_BLOB_EXECUTABLE) { + paths->push_back(*buffer + std::string(git_tree_entry_name(entry))); + } + else if (filemode == GIT_FILEMODE_TREE) { + git_tree *subtree; + int error = git_tree_lookup(&subtree, repo, git_tree_entry_id(entry)); + if (error == GIT_OK) { + size_t size = buffer->size(); + /* append the next entry to the path */ + buffer->append(git_tree_entry_name(entry)); + buffer->append("/"); + error = iterateTreePaths(repo, subtree, paths, buffer); + git_tree_free(subtree); + buffer->resize(size); + } + + if (error < 0 ) { + return error; + } + + } + } + return GIT_OK; +} + +} // end anonymous namespace + +NAN_METHOD(GitTree::GetAllFilepaths) +{ + if (!info[info.Length() - 1]->IsFunction()) { + return Nan::ThrowError("Callback is required and must be a Function."); + } + + GetAllFilepathsBaton* baton = new GetAllFilepathsBaton(); + + baton->error_code = GIT_OK; + baton->error = NULL; + baton->tree = Nan::ObjectWrap::Unwrap(info.This())->GetValue(); + baton->out = new std::vector; + baton->repo = git_tree_owner(baton->tree); + + Nan::Callback *callback = new Nan::Callback(Local::Cast(info[info.Length() - 1])); + std::map> cleanupHandles; + GetAllFilepathsWorker *worker = new GetAllFilepathsWorker(baton, callback, cleanupHandles); + worker->Reference("tree", info.This()); + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + nodegitContext->QueueWorker(worker); + + return; +} + +nodegit::LockMaster GitTree::GetAllFilepathsWorker::AcquireLocks() { + nodegit::LockMaster lockMaster(true, baton->tree, baton->repo); + return lockMaster; +} + +void GitTree::GetAllFilepathsWorker::Execute() +{ + std::string buffer; + buffer.reserve(4096); + baton->error_code = TreeFilepathsHelpers::iterateTreePaths(baton->repo, baton->tree, baton->out, &buffer); + if (baton->error_code != GIT_OK && git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); + } +} + +void GitTree::GetAllFilepathsWorker::HandleErrorCallback() { + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + delete baton->out; + + delete baton; +} + +void GitTree::GetAllFilepathsWorker::HandleOKCallback() +{ + if (baton->error_code == GIT_OK) { + std::vector &paths = *(baton->out); + v8::Local result = Nan::New(paths.size()); + for (unsigned int i = 0; i < paths.size(); i++) { + Nan::Set(result, i, Nan::New(paths[i]).ToLocalChecked()); + } + + v8::Local argv[2] = {Nan::Null(), result}; + callback->Call(2, argv, async_resource); + } + else + { + if (baton->error) + { + Local err; + if (baton->error->message) { + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); + } else { + err = Nan::To(Nan::Error("Method getAllFilepaths has thrown an error.")).ToLocalChecked(); + } + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Tree.getAllFilepaths").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + if (baton->error->message) + { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + else if (baton->error_code < 0) + { + bool callbackFired = false; + if (!callbackErrorHandle.IsEmpty()) { + v8::Local maybeError = Nan::New(callbackErrorHandle); + if (!maybeError->IsNull() && !maybeError->IsUndefined()) { + v8::Local argv[1] = { + maybeError + }; + callback->Call(1, argv, async_resource); + callbackFired = true; + } + } + + if (!callbackFired) + { + Local err = Nan::To(Nan::Error("Method getAllFilepaths has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("Revwalk.getAllFilepaths").ToLocalChecked()); + Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); + } + } + else + { + callback->Call(0, NULL, async_resource); + } + } + + delete baton->out; + delete baton; +} diff --git a/generate/templates/partials/async_function.cc b/generate/templates/partials/async_function.cc index c4cdb115d0..d23ec7f609 100644 --- a/generate/templates/partials/async_function.cc +++ b/generate/templates/partials/async_function.cc @@ -2,11 +2,11 @@ {%partial doc .%} NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%partial guardArguments .%} - if (info.Length() == {{args|jsArgsCount}} || !info[{{args|jsArgsCount}}]->IsFunction()) { + if (!info[info.Length() - 1]->IsFunction()) { return Nan::ThrowError("Callback is required and must be a Function."); } - {{ cppFunctionName }}Baton* baton = new {{ cppFunctionName }}Baton; + {{ cppFunctionName }}Baton* baton = new {{ cppFunctionName }}Baton(); baton->error_code = GIT_OK; baton->error = NULL; @@ -17,6 +17,9 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%endif%} {%endeach%} + nodegit::Context *nodegitContext = reinterpret_cast(info.Data().As()->Value()); + std::map> cleanupHandles; + {%each args|argsInfo as arg %} {%if not arg.isReturn %} {%if arg.isSelf %} @@ -27,6 +30,7 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%if arg.payload.globalPayload %} globalPayload->{{ arg.name }} = NULL; {%else%} + // NOTE this is a dead path baton->{{ arg.payload.name }} = NULL; {%endif%} } @@ -35,6 +39,7 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%if arg.payload.globalPayload %} globalPayload->{{ arg.name }} = new Nan::Callback(info[{{ arg.jsArg }}].As()); {%else%} + // NOTE this is a dead path baton->{{ arg.payload.name }} = new Nan::Callback(info[{{ arg.jsArg }}].As()); {%endif%} } @@ -42,6 +47,45 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%if arg.globalPayload %} baton->{{ arg.name }} = globalPayload; {%endif%} + {% elsif arg.isStructType %} + {% if arg.isOptional %} + if (info[{{ arg.jsArg }}]->IsNull() || info[{{ arg.jsArg }}]->IsUndefined()) { + baton->{{ arg.name }} = nullptr; + } else + {% endif %} + {% if arg.cppClassName == 'Array' %} + { + v8::Local tempArray = v8::Local::Cast(info[{{ arg.jsArg }}]); + baton->{{ arg.name }} = ({{ arg.cType|unPointer }}*)malloc(sizeof({{ arg.cType|unPointer }}) * tempArray->Length()); + for (uint32_t i = 0; i < tempArray->Length(); ++i) { + auto conversionResult = Configurable{{ arg.arrayElementCppClassName }}::fromJavascript( + nodegitContext, + Nan::Get(tempArray, i).ToLocalChecked() + ); + + if (!conversionResult.result) { + // TODO free previously allocated memory + free(baton->{{ arg.name }}); + return Nan::ThrowError(Nan::New(conversionResult.error).ToLocalChecked()); + } + + auto convertedObject = conversionResult.result; + cleanupHandles[std::string("{{ arg.name }}") + std::to_string(i)] = convertedObject; + baton->{{ arg.name }}[i] = *convertedObject->GetValue(); + } + } + {% else %} + { + auto conversionResult = Configurable{{ arg.cppClassName }}::fromJavascript(nodegitContext, info[{{ arg.jsArg }}]); + if (!conversionResult.result) { + return Nan::ThrowError(Nan::New(conversionResult.error).ToLocalChecked()); + } + + auto convertedObject = conversionResult.result; + cleanupHandles["{{ arg.name }}"] = convertedObject; + baton->{{ arg.name }} = convertedObject->GetValue(); + } + {% endif %} {%elsif arg.name %} {%partial convertFromV8 arg%} {%if not arg.payloadFor %} @@ -55,43 +99,55 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { baton->{{arg.name}} = ({{ arg.cType }})malloc(sizeof({{ arg.cType|replace '*' '' }})); {%if arg.cppClassName == "GitBuf" %} baton->{{arg.name}}->ptr = NULL; - baton->{{arg.name}}->size = baton->{{arg.name}}->asize = 0; + baton->{{arg.name}}->size = baton->{{arg.name}}->reserved = 0; {%endif%} {%endif%} {%endeach%} - Nan::Callback *callback = new Nan::Callback(v8::Local::Cast(info[{{args|jsArgsCount}}])); - {{ cppFunctionName }}Worker *worker = new {{ cppFunctionName }}Worker(baton, callback); + Nan::Callback *callback = new Nan::Callback(v8::Local::Cast(info[info.Length() - 1])); + {{ cppFunctionName }}Worker *worker = new {{ cppFunctionName }}Worker(baton, callback, cleanupHandles); {%each args|argsInfo as arg %} {%if not arg.isReturn %} {%if arg.isSelf %} - worker->SaveToPersistent("{{ arg.name }}", info.This()); + worker->Reference<{{ arg.cppClassName }}>("{{ arg.name }}", info.This()); {%elsif not arg.isCallbackFunction %} - if (!info[{{ arg.jsArg }}]->IsUndefined() && !info[{{ arg.jsArg }}]->IsNull()) - worker->SaveToPersistent("{{ arg.name }}", info[{{ arg.jsArg }}]->ToObject()); + {%if arg.isUnwrappable %} + {% if arg.cppClassName == "Array" %} + if (info[{{ arg.jsArg }}]->IsArray()) { + worker->Reference<{{ arg.arrayElementCppClassName }}>("{{ arg.name }}", info[{{ arg.jsArg }}].As()); + } + {% else %} + worker->Reference<{{ arg.cppClassName }}>("{{ arg.name }}", info[{{ arg.jsArg }}]); + {% endif %} + {% else %} + worker->Reference("{{ arg.name }}", info[{{ arg.jsArg }}]); + {% endif %} {%endif%} {%endif%} {%endeach%} - AsyncLibgit2QueueWorker(worker); + nodegitContext->QueueWorker(worker); return; } -void {{ cppClassName }}::{{ cppFunctionName }}Worker::Execute() { - giterr_clear(); - - { - LockMaster lockMaster( - /*asyncAction: */true - {%each args|argsInfo as arg %} - {%if arg.cType|isPointer%} - {%if not arg.cType|isDoublePointer%} - ,baton->{{ arg.name }} - {%endif%} +nodegit::LockMaster {{ cppClassName }}::{{ cppFunctionName }}Worker::AcquireLocks() { + nodegit::LockMaster lockMaster( + /*asyncAction: */true + {%each args|argsInfo as arg %} + {%if arg.cType|isPointer%} + {%if not arg.cType|isDoublePointer%} + ,baton->{{ arg.name }} {%endif%} - {%endeach%} - ); + {%endif%} + {%endeach%} + ); + + return lockMaster; +} + +void {{ cppClassName }}::{{ cppFunctionName }}Worker::Execute() { + git_error_clear(); {%if .|hasReturnType %} {{ return.cType }} result = {{ cFunctionName }}( @@ -105,25 +161,94 @@ void {{ cppClassName }}::{{ cppFunctionName }}Worker::Execute() { {%endeach%} ); - {%if return.isResultOrError %} + {% if return.isResultOrError %} baton->error_code = result; - if (result < GIT_OK && giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); + if (result < GIT_OK && git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); } - {%elsif return.isErrorCode %} + {% elsif return.isErrorCode %} baton->error_code = result; - if (result != GIT_OK && giterr_last() != NULL) { - baton->error = git_error_dup(giterr_last()); + if (result != GIT_OK && git_error_last()->klass != GIT_ERROR_NONE) { + baton->error = git_error_dup(git_error_last()); } - {%elsif not return.cType == 'void' %} + {%elsif return.cType != 'void' %} baton->result = result; {%endif%} +} + +void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleErrorCallback() { + if (!GetIsCancelled()) { + v8::Local err = Nan::To(Nan::Error(ErrorMessage())).ToLocalChecked(); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("{{ jsClassName }}.{{ jsFunctionName }}").ToLocalChecked()); + v8::Local argv[1] = { + err + }; + callback->Call(1, argv, async_resource); } + + if (baton->error) { + if (baton->error->message) { + free((void *)baton->error->message); + } + + free((void *)baton->error); + } + + {%each args|argsInfo as arg %} + {%if arg.shouldAlloc %} + {%if not arg.isCppClassStringOrArray %} + {%elsif arg | isOid %} + if (baton->{{ arg.name}}NeedsFree) { + baton->{{ arg.name}}NeedsFree = false; + free((void*)baton->{{ arg.name }}); + } + {%elsif arg.isCallbackFunction %} + {%if not arg.payload.globalPayload %} + delete baton->{{ arg.payload.name }}; + {%endif%} + {%elsif arg.globalPayload %} + delete ({{ cppFunctionName}}_globalPayload*)baton->{{ arg.name }}; + {%else%} + free((void*)baton->{{ arg.name }}); + {%endif%} + {%elsif arg.freeFunctionName|and arg.isReturn|and arg.selfFreeing %} + {{ arg.freeFunctionName }}(baton->{{ arg.name }}); + {%endif%} + {%endeach%} + + {%each args|argsInfo as arg %} + {%if arg.isCppClassStringOrArray %} + {%if arg.freeFunctionName %} + {%elsif not arg.isConst%} + free((void *)baton->{{ arg.name }}); + {%endif%} + {%elsif arg | isOid %} + if (baton->{{ arg.name}}NeedsFree) { + baton->{{ arg.name}}NeedsFree = false; + free((void *)baton->{{ arg.name }}); + } + {%elsif arg.isCallbackFunction %} + {%if not arg.payload.globalPayload %} + delete baton->{{ arg.payload.name }}; + {%endif%} + {%elsif arg.globalPayload %} + delete ({{ cppFunctionName}}_globalPayload*)baton->{{ arg.name }}; + {%endif%} + {%if arg.cppClassName == "GitBuf" %} + {%if cppFunctionName == "Set" %} + {%else%} + git_buf_dispose(baton->{{ arg.name }}); + free((void *)baton->{{ arg.name }}); + {%endif%} + {%endif%} + {%endeach%} + + delete baton; } void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleOKCallback() { @@ -139,21 +264,38 @@ void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleOKCallback() { {%elsif not .|returnsCount %} v8::Local result = Nan::Undefined(); {%else%} - v8::Local to; + v8::Local v8ConversionSlot; {%if .|returnsCount > 1 %} v8::Local result = Nan::New(); {%endif%} {%each .|returnsInfo 0 1 as _return %} {%partial convertToV8 _return %} {%if .|returnsCount > 1 %} - Nan::Set(result, Nan::New("{{ _return.returnNameOrName }}").ToLocalChecked(), to); + Nan::Set(result, Nan::New("{{ _return.returnNameOrName }}").ToLocalChecked(), v8ConversionSlot); {%endif%} {%endeach%} {%if .|returnsCount == 1 %} - v8::Local result = to; + v8::Local result = v8ConversionSlot; {%endif%} {%endif%} + {% each args|argsInfo as arg %} + {% if not arg.ignore %} + {% if arg.isStructType %} + {% if arg.preserveOnThis %} + { + {% if args|thisInfo 'isReturn' %} + auto objWrap = Nan::ObjectWrap::Unwrap<{{ args|thisInfo 'cppClassName' }}>(result.As()); + {% else %} + auto objWrap = Nan::ObjectWrap::Unwrap<{{ args|thisInfo 'cppClassName' }}>(GetFromPersistent("{{ args|thisInfo 'name' }}").As()); + {% endif %} + objWrap->SaveCleanupHandle(cleanupHandles["{{ arg.name }}"]); + } + {% endif %} + {% endif %} + {% endif %} + {% endeach %} + v8::Local argv[2] = { Nan::Null(), result @@ -163,12 +305,12 @@ void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleOKCallback() { if (baton->error) { v8::Local err; if (baton->error->message) { - err = Nan::Error(baton->error->message)->ToObject(); + err = Nan::To(Nan::Error(baton->error->message)).ToLocalChecked(); } else { - err = Nan::Error("Method {{ jsFunctionName }} has thrown an error.")->ToObject(); + err = Nan::To(Nan::Error("Method {{ jsFunctionName }} has thrown an error.")).ToLocalChecked(); } - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("{{ jsClassName }}.{{ jsFunctionName }}").ToLocalChecked()); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("{{ jsClassName }}.{{ jsFunctionName }}").ToLocalChecked()); v8::Local argv[1] = { err }; @@ -177,60 +319,22 @@ void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleOKCallback() { free((void *)baton->error->message); free((void *)baton->error); } else if (baton->error_code < 0) { - std::queue< v8::Local > workerArguments; - {%each args|argsInfo as arg %} - {%if not arg.isReturn %} - {%if not arg.isSelf %} - {%if not arg.isCallbackFunction %} - workerArguments.push(GetFromPersistent("{{ arg.name }}")); - {%endif%} - {%endif%} - {%endif%} - {%endeach%} bool callbackFired = false; - while(!workerArguments.empty()) { - v8::Local node = workerArguments.front(); - workerArguments.pop(); - - if ( - !node->IsObject() - || node->IsArray() - || node->IsBooleanObject() - || node->IsDate() - || node->IsFunction() - || node->IsNumberObject() - || node->IsRegExp() - || node->IsStringObject() - ) { - continue; - } - - v8::Local nodeObj = node->ToObject(); - v8::Local checkValue = GetPrivate(nodeObj, Nan::New("NodeGitPromiseError").ToLocalChecked()); - - if (!checkValue.IsEmpty() && !checkValue->IsNull() && !checkValue->IsUndefined()) { + if (!callbackErrorHandle.IsEmpty()) { + v8::Local maybeError = Nan::New(callbackErrorHandle); + if (!maybeError->IsNull() && !maybeError->IsUndefined()) { v8::Local argv[1] = { - checkValue->ToObject() + maybeError }; callback->Call(1, argv, async_resource); callbackFired = true; - break; - } - - v8::Local properties = nodeObj->GetPropertyNames(); - for (unsigned int propIndex = 0; propIndex < properties->Length(); ++propIndex) { - v8::Local propName = properties->Get(propIndex)->ToString(); - v8::Local nodeToQueue = nodeObj->Get(propName); - if (!nodeToQueue->IsUndefined()) { - workerArguments.push(nodeToQueue); - } } } if (!callbackFired) { - v8::Local err = Nan::Error("Method {{ jsFunctionName }} has thrown an error.")->ToObject(); - err->Set(Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); - err->Set(Nan::New("errorFunction").ToLocalChecked(), Nan::New("{{ jsClassName }}.{{ jsFunctionName }}").ToLocalChecked()); + v8::Local err = Nan::To(Nan::Error("Method {{ jsFunctionName }} has thrown an error.")).ToLocalChecked(); + Nan::Set(err, Nan::New("errno").ToLocalChecked(), Nan::New(baton->error_code)); + Nan::Set(err, Nan::New("errorFunction").ToLocalChecked(), Nan::New("{{ jsClassName }}.{{ jsFunctionName }}").ToLocalChecked()); v8::Local argv[1] = { err }; @@ -257,6 +361,8 @@ void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleOKCallback() { {%else%} free((void*)baton->{{ arg.name }}); {%endif%} + {%elsif arg.freeFunctionName|and arg.isReturn|and arg.selfFreeing %} + {{ arg.freeFunctionName }}(baton->{{ arg.name }}); {%endif%} {%endeach%} } @@ -264,7 +370,6 @@ void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleOKCallback() { {%each args|argsInfo as arg %} {%if arg.isCppClassStringOrArray %} {%if arg.freeFunctionName %} - {{ arg.freeFunctionName }}(baton->{{ arg.name }}); {%elsif not arg.isConst%} free((void *)baton->{{ arg.name }}); {%endif%} @@ -283,7 +388,7 @@ void {{ cppClassName }}::{{ cppFunctionName }}Worker::HandleOKCallback() { {%if arg.cppClassName == "GitBuf" %} {%if cppFunctionName == "Set" %} {%else%} - git_buf_free(baton->{{ arg.name }}); + git_buf_dispose(baton->{{ arg.name }}); free((void *)baton->{{ arg.name }}); {%endif%} {%endif%} diff --git a/generate/templates/partials/callback_helpers.cc b/generate/templates/partials/callback_helpers.cc index 1abfaa991a..5050350e41 100644 --- a/generate/templates/partials/callback_helpers.cc +++ b/generate/templates/partials/callback_helpers.cc @@ -12,7 +12,13 @@ baton.{{ arg.name }} = {{ arg.name }}; {% endeach %} - return baton.ExecuteAsync({{ cppFunctionName }}_{{ cbFunction.name }}_async); + return baton.ExecuteAsync({{ cppFunctionName }}_{{ cbFunction.name }}_async, {{ cppFunctionName }}_{{ cbFunction.name }}_cancelAsync); +} + +void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_cancelAsync(void *untypedBaton) { + {{ cppFunctionName }}_{{ cbFunction.name|titleCase }}Baton* baton = static_cast<{{ cppFunctionName }}_{{ cbFunction.name|titleCase }}Baton*>(untypedBaton); + baton->result = {{ cbFunction.return.cancel }}; + baton->Done(); } void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_async(void *untypedBaton) { @@ -30,32 +36,27 @@ void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_async(void {% endif %} {% endeach %} - v8::Local argv[{{ cbFunction.args|jsArgsCount }}] = { - {% each cbFunction.args|argsInfo as arg %} - {% if arg | isPayload %} - {%-- payload is always the last arg --%} - // payload is null because we can use closure scope in javascript - Nan::Undefined() - {% elsif arg.isJsArg %} - {% if arg.isEnum %} - Nan::New((int)baton->{{ arg.name }}), - {% elsif arg.isLibgitType %} - {{ arg.cppClassName }}::New(baton->{{ arg.name }}, false), - {% elsif arg.cType == "size_t" %} - // HACK: NAN should really have an overload for Nan::New to support size_t - Nan::New((unsigned int)baton->{{ arg.name }}), - {% elsif arg.cppClassName == 'String' %} - Nan::New(baton->{{ arg.name }}).ToLocalChecked(), - {% else %} - Nan::New(baton->{{ arg.name }}), - {% endif %} + v8::Local argv[{{ cbFunction.args|callbackArgsCount }}] = { + {% each cbFunction.args|callbackArgsInfo as arg %} + {% if not arg.firstArg %}, {% endif %} + {% if arg.isEnum %} + Nan::New((int)baton->{{ arg.name }}) + {% elsif arg.isLibgitType %} + {{ arg.cppClassName }}::New(baton->{{ arg.name }}, false) + {% elsif arg.cType == "size_t" %} + // HACK: NAN should really have an overload for Nan::New to support size_t + Nan::New((unsigned int)baton->{{ arg.name }}) + {% elsif arg.cppClassName == 'String' %} + Nan::New(baton->{{ arg.name }}).ToLocalChecked() + {% else %} + Nan::New(baton->{{ arg.name }}) {% endif %} {% endeach %} }; Nan::TryCatch tryCatch; - // TODO This should take an async_resource, but we will need to figure out how to pipe the correct context into this - Nan::MaybeLocal maybeResult = Nan::Call(*callback, {{ cbFunction.args|jsArgsCount }}, argv); + Nan::MaybeLocal maybeResult = (*callback)(baton->GetAsyncResource(), {{ cbFunction.args|callbackArgsCount }}, argv); + v8::Local result; if (!maybeResult.IsEmpty()) { result = maybeResult.ToLocalChecked(); @@ -71,7 +72,7 @@ void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_async(void } else if (!result->IsNull() && !result->IsUndefined()) { {% if _return.isOutParam %} - {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(result->ToObject()); + {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(Nan::To(result).ToLocalChecked()); wrapper->selfFreeing = false; *baton->{{ _return.name }} = wrapper->GetValue(); @@ -93,7 +94,7 @@ void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_async(void baton->Done(); } -void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_promiseCompleted(bool isFulfilled, AsyncBaton *_baton, v8::Local result) { +void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_promiseCompleted(bool isFulfilled, nodegit::AsyncBaton *_baton, v8::Local result) { Nan::HandleScope scope; {{ cppFunctionName }}_{{ cbFunction.name|titleCase }}Baton* baton = static_cast<{{ cppFunctionName }}_{{ cbFunction.name|titleCase }}Baton*>(_baton); @@ -105,7 +106,7 @@ void {{ cppClassName }}::{{ cppFunctionName }}_{{ cbFunction.name }}_promiseComp } else if (!result->IsNull() && !result->IsUndefined()) { {% if _return.isOutParam %} - {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(result->ToObject()); + {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(Nan::To(result).ToLocalChecked()); wrapper->selfFreeing = false; *baton->{{ _return.name }} = wrapper->GetValue(); diff --git a/generate/templates/partials/configurable_callbacks.cc b/generate/templates/partials/configurable_callbacks.cc new file mode 100644 index 0000000000..79eef554b8 --- /dev/null +++ b/generate/templates/partials/configurable_callbacks.cc @@ -0,0 +1,227 @@ +{% each fields|fieldsInfo as field %} + {% if not field.ignore %} + {% if field.isCallbackFunction %} + Configurable{{ cppClassName }}* Configurable{{ cppClassName }}::{{ field.jsFunctionName }}_getInstanceFromBaton({{ field.name|titleCase }}Baton* baton) { + {% if isExtendedStruct %} + return static_cast((({{cType}}_extended *)baton->self)->payload); + {% else %} + return static_cast(baton-> + {% each field.args|argsInfo as arg %} + {% if arg.payload == true %} + {{arg.name}} + {% elsif arg.lastArg %} + {{arg.name}} + {% endif %} + {% endeach %}); + {% endif %} + } + + {{ field.return.type }} Configurable{{ cppClassName }}::{{ field.jsFunctionName }}_cppCallback ( + {% each field.args|argsInfo as arg %} + {{ arg.cType }} {{ arg.name}}{% if not arg.lastArg %},{% endif %} + {% endeach %} + ) { + {{ field.name|titleCase }}Baton *baton = + new {{ field.name|titleCase }}Baton({{ field.return.noResults }}); + + {% each field.args|argsInfo as arg %} + baton->{{ arg.name }} = {{ arg.name }}; + {% endeach %} + + Configurable{{ cppClassName }}* instance = {{ field.jsFunctionName }}_getInstanceFromBaton(baton); + + {% if field.return.type == "void" %} + if (instance->nodegitContext != nodegit::ThreadPool::GetCurrentContext()) { + delete baton; + } else if (instance->{{ field.jsFunctionName }}.WillBeThrottled()) { + delete baton; + } else if (instance->{{ field.jsFunctionName }}.ShouldWaitForResult()) { + baton->ExecuteAsync({{ field.jsFunctionName }}_async, {{ field.jsFunctionName }}_cancelAsync); + delete baton; + } else { + baton->ExecuteAsync({{ field.jsFunctionName }}_async, {{ field.jsFunctionName }}_cancelAsync, nodegit::deleteBaton); + } + return; + {% else %} + {{ field.return.type }} result; + + if (instance->nodegitContext != nodegit::ThreadPool::GetCurrentContext()) { + result = baton->defaultResult; + delete baton; + } else if (instance->{{ field.jsFunctionName }}.WillBeThrottled()) { + result = baton->defaultResult; + delete baton; + } else if (instance->{{ field.jsFunctionName }}.ShouldWaitForResult()) { + result = baton->ExecuteAsync({{ field.jsFunctionName }}_async, {{ field.jsFunctionName }}_cancelAsync); + delete baton; + } else { + result = baton->defaultResult; + baton->ExecuteAsync({{ field.jsFunctionName }}_async, {{ field.jsFunctionName }}_cancelAsync, nodegit::deleteBaton); + } + return result; + {% endif %} + } + + void Configurable{{ cppClassName }}::{{ field.jsFunctionName }}_cancelAsync(void *untypedBaton) { + {{ field.name|titleCase }}Baton* baton = static_cast<{{ field.name|titleCase }}Baton*>(untypedBaton); + {% if field.return.type != "void" %} + baton->result = {{ field.return.cancel }}; + {% endif %} + baton->Done(); + } + + void Configurable{{ cppClassName }}::{{ field.jsFunctionName }}_async(void *untypedBaton) { + Nan::HandleScope scope; + + {{ field.name|titleCase }}Baton* baton = static_cast<{{ field.name|titleCase }}Baton*>(untypedBaton); + Configurable{{ cppClassName }}* instance = {{ field.jsFunctionName }}_getInstanceFromBaton(baton); + + if (instance->{{ field.jsFunctionName }}.GetCallback()->IsEmpty()) { + {% if field.return.type == "int" %} + baton->result = baton->defaultResult; // no results acquired + {% endif %} + baton->Done(); + return; + } + + {% each field.args|callbackArgsInfo as arg %} + {% if arg.cppClassName == "Array" %} + v8::Local _{{arg.name}}_array = Nan::New(baton->{{ arg.arrayLengthArgumentName }}); + for(uint32_t i = 0; i < _{{arg.name}}_array->Length(); i++) { + Nan::Set(_{{arg.name}}_array, i, {{arg.arrayElementCppClassName}}::New(baton->{{arg.name}}[i], false)); + } + {% endif %} + {% endeach %} + + {% if field.args|callbackArgsCount == 0 %} + v8::Local *argv = NULL; + {% else %} + v8::Local argv[{{ field.args|callbackArgsCount }}] = { + {% each field.args|callbackArgsInfo as arg %} + {% if not arg.firstArg %},{% endif %} + {% if arg.isEnum %} + Nan::New((int)baton->{{ arg.name }}) + {% elsif arg.cppClassName == "Array" %} + _{{arg.name}}_array + {% elsif arg.isLibgitType %} + {{ arg.cppClassName }}::New(baton->{{ arg.name }}, false) + {% elsif arg.cType == "size_t" %} + // HACK: NAN should really have an overload for Nan::New to support size_t + Nan::New((unsigned int)baton->{{ arg.name }}) + {% elsif arg.cppClassName == "String" %} + baton->{{ arg.name }} == NULL + ? Nan::EmptyString() + : Nan::New({%if arg.cType | isDoublePointer %}*{% endif %}baton->{{ arg.name }}).ToLocalChecked() + {% else %} + Nan::New(baton->{{ arg.name }}) + {% endif %} + {% endeach %} + }; + {% endif %} + + Nan::TryCatch tryCatch; + + Nan::MaybeLocal maybeResult = (*(instance->{{ field.jsFunctionName }}.GetCallback()))( + baton->GetAsyncResource(), + {{ field.args|callbackArgsCount }}, + argv + ); + v8::Local result; + if (!maybeResult.IsEmpty()) { + result = maybeResult.ToLocalChecked(); + } + + if (PromiseCompletion::ForwardIfPromise(result, baton, Configurable{{ cppClassName }}::{{ field.jsFunctionName }}_promiseCompleted)) { + return; + } + + {% if field.return.type == "void" %} + baton->Done(); + {% else %} + {% each field|returnsInfo false true as _return %} + if (result.IsEmpty() || result->IsNativeError()) { + baton->result = {{ field.return.error }}; + } + else if (!result->IsNull() && !result->IsUndefined()) { + {% if _return.isOutParam %} + {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(Nan::To(result).ToLocalChecked()); + wrapper->selfFreeing = false; + + {% if _return.cppClassName == "GitOid" %} + git_oid_cpy(baton->{{ _return.name }}, wrapper->GetValue()); + {% else %} + *baton->{{ _return.name }} = wrapper->GetValue(); + {% endif %} + baton->result = {{ field.return.success }}; + {% else %} + if (result->IsNumber()) { + baton->result = Nan::To(result).FromJust(); + } + else { + baton->result = baton->defaultResult; + } + {% endif %} + } + else { + baton->result = baton->defaultResult; + } + {% endeach %} + baton->Done(); + {% endif %} + } + + void Configurable{{ cppClassName }}::{{ field.jsFunctionName }}_promiseCompleted(bool isFulfilled, nodegit::AsyncBaton *_baton, v8::Local result) { + Nan::HandleScope scope; + + {{ field.name|titleCase }}Baton* baton = static_cast<{{ field.name|titleCase }}Baton*>(_baton); + {% if field.return.type == "void" %} + baton->Done(); + {% else %} + if (isFulfilled) { + {% each field|returnsInfo false true as _return %} + if (result.IsEmpty() || result->IsNativeError()) { + baton->result = {{ field.return.error }}; + } + else if (!result->IsNull() && !result->IsUndefined()) { + {% if _return.isOutParam %} + {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(Nan::To(result).ToLocalChecked()); + wrapper->selfFreeing = false; + + {% if _return.cppClassName == "GitOid" %} + git_oid_cpy(baton->{{ _return.name }}, wrapper->GetValue()); + {% else %} + *baton->{{ _return.name }} = wrapper->GetValue(); + {% endif %} + baton->result = {{ field.return.success }}; + {% else %} + if (result->IsNumber()) { + baton->result = Nan::To(result).FromJust(); + } + else { + baton->result = baton->defaultResult; + } + {% endif %} + } + else { + baton->result = baton->defaultResult; + } + {% endeach %} + } + else { + // promise was rejected + {% if isExtendedStruct %} + Configurable{{ cppClassName }}* instance = static_cast((({{cType}}_extended *)baton->self)->payload); + {% else %} + Configurable{{ cppClassName }}* instance = static_cast(baton->{% each field.args|argsInfo as arg %} + {% if arg.payload == true %}{{arg.name}}{% elsif arg.lastArg %}{{arg.name}}{% endif %} + {% endeach %}); + {% endif %} + baton->SetCallbackError(result); + baton->result = {{ field.return.error }}; + } + baton->Done(); + {% endif %} + } + {% endif %} + {% endif %} +{% endeach %} diff --git a/generate/templates/partials/convert_from_v8.cc b/generate/templates/partials/convert_from_v8.cc index 7153b2a9aa..f33eddd0f6 100644 --- a/generate/templates/partials/convert_from_v8.cc +++ b/generate/templates/partials/convert_from_v8.cc @@ -14,12 +14,12 @@ {% elsif cppClassName == 'GitBuf' %} {%-- Print nothing --%} {%else%} - if (info[{{ jsArg }}]->Is{{ cppClassName|cppToV8 }}()) { + if ((info.Length() - 1) > {{ jsArg }} && info[{{ jsArg }}]->Is{{ cppClassName|cppToV8 }}()) { {%endif%} {%endif%} {%if cppClassName == 'String'%} - String::Utf8Value {{ name }}(info[{{ jsArg }}]->ToString()); + Nan::Utf8String {{ name }}(Nan::To(info[{{ jsArg }}]).ToLocalChecked()); // malloc with one extra byte so we can add the terminating null character C-strings expect: from_{{ name }} = ({{ cType }}) malloc({{ name }}.length() + 1); // copy the characters from the nodejs string into our C-string (used instead of strdup or strcpy because nulls in @@ -36,7 +36,7 @@ from_{{ name }} = GitBufConverter::Convert(info[{{ jsArg }}]); {%elsif cppClassName == 'Wrapper'%} - String::Utf8Value {{ name }}(info[{{ jsArg }}]->ToString()); + Nan::Utf8String {{ name }}(Nan::To(info[{{ jsArg }}]).ToLocalChecked()); // malloc with one extra byte so we can add the terminating null character C-strings expect: from_{{ name }} = ({{ cType }}) malloc({{ name }}.length() + 1); // copy the characters from the nodejs string into our C-string (used instead of strdup or strcpy because nulls in @@ -47,18 +47,33 @@ memset((void *)(((char *)from_{{ name }}) + {{ name }}.length()), 0, 1); {%elsif cppClassName == 'Array'%} - Array *tmp_{{ name }} = Array::Cast(*info[{{ jsArg }}]); - from_{{ name }} = ({{ cType }})malloc(tmp_{{ name }}->Length() * sizeof({{ cType|replace '**' '*' }})); - for (unsigned int i = 0; i < tmp_{{ name }}->Length(); i++) { + v8::Local tmp_{{ name }} = v8::Local::Cast(info[{{ jsArg }}]); + from_{{ name }} = ({{ cType }})malloc(tmp_{{ name }}->Length() * sizeof({{ cType|unPointer }})); + for (unsigned int i = 0; i < tmp_{{ name }}->Length(); i++) { {%-- // FIXME: should recursively call convertFromv8. --%} - from_{{ name }}[i] = Nan::ObjectWrap::Unwrap<{{ arrayElementCppClassName }}>(tmp_{{ name }}->Get(Nan::New(static_cast(i)))->ToObject())->GetValue(); + const v8::Local arrayVal = Nan::Get(tmp_{{ name }},i).ToLocalChecked(); + {%if arrayElementCppClassName == 'GitOid'%} + if (arrayVal->IsString()) { + // Try and parse in a string to a git_oid + Nan::Utf8String oidString(Nan::To(arrayVal).ToLocalChecked()); + + if (git_oid_fromstr(&from_{{ name }}[i], (const char *) strdup(*oidString)) != GIT_OK) { + return Nan::ThrowError(git_error_last()->message); + } + } + else { + git_oid_cpy(&from_{{ name }}[i], Nan::ObjectWrap::Unwrap(Nan::To(arrayVal).ToLocalChecked())->GetValue()); } + {%else%} + from_{{ name }}[i] = Nan::ObjectWrap::Unwrap<{{ arrayElementCppClassName }}>(Nan::To(arrayVal).ToLocalChecked())->GetValue(); + {%endif%} + } {%elsif cppClassName == 'Function'%} {%elsif cppClassName == 'Buffer'%} - from_{{ name }} = Buffer::Data(info[{{ jsArg }}]->ToObject()); + from_{{ name }} = Buffer::Data(Nan::To(info[{{ jsArg }}]).ToLocalChecked()); {%elsif cppClassName|isV8Value %} {%if cType|isPointer %} @@ -69,14 +84,14 @@ {%elsif cppClassName == 'GitOid'%} if (info[{{ jsArg }}]->IsString()) { // Try and parse in a string to a git_oid - String::Utf8Value oidString(info[{{ jsArg }}]->ToString()); + Nan::Utf8String oidString(Nan::To(info[{{ jsArg }}]).ToLocalChecked()); git_oid *oidOut = (git_oid *)malloc(sizeof(git_oid)); if (git_oid_fromstr(oidOut, (const char *) strdup(*oidString)) != GIT_OK) { free(oidOut); - if (giterr_last()) { - return Nan::ThrowError(giterr_last()->message); + if (git_error_last()->klass != GIT_ERROR_NONE) { + return Nan::ThrowError(git_error_last()->message); } else { return Nan::ThrowError("Unknown Error"); } @@ -89,10 +104,10 @@ {%endif%} } else { - {%if cType|isDoublePointer %}*{%endif%}from_{{ name }} = Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(info[{{ jsArg }}]->ToObject())->GetValue(); + {%if cType|isDoublePointer %}*{%endif%}from_{{ name }} = Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(Nan::To(info[{{ jsArg }}]).ToLocalChecked())->GetValue(); } {%else%} - {%if cType|isDoublePointer %}*{%endif%}from_{{ name }} = Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(info[{{ jsArg }}]->ToObject())->GetValue(); + {%if cType|isDoublePointer %}*{%endif%}from_{{ name }} = Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(Nan::To(info[{{ jsArg }}]).ToLocalChecked())->GetValue(); {%endif%} {%if isBoolean %} diff --git a/generate/templates/partials/convert_to_v8.cc b/generate/templates/partials/convert_to_v8.cc index 908f990182..ccba9b330f 100644 --- a/generate/templates/partials/convert_to_v8.cc +++ b/generate/templates/partials/convert_to_v8.cc @@ -2,15 +2,18 @@ {% if cppClassName == 'String' %} if ({{= parsedName =}}){ {% if size %} - to = Nan::New({{= parsedName =}}, {{ size }}).ToLocalChecked(); + v8ConversionSlot = Nan::New({{= parsedName =}}, {{ size }}).ToLocalChecked(); {% elsif cType == 'char **' %} - to = Nan::New(*{{= parsedName =}}).ToLocalChecked(); + v8ConversionSlot = Nan::New(*{{= parsedName =}}).ToLocalChecked(); + {% elsif cType == 'char' %} + char convertToNullTerminated[2] = { {{= parsedName =}}, '\0' }; + v8ConversionSlot = Nan::New(convertToNullTerminated).ToLocalChecked(); {% else %} - to = Nan::New({{= parsedName =}}).ToLocalChecked(); + v8ConversionSlot = Nan::New({{= parsedName =}}).ToLocalChecked(); {% endif %} } else { - to = Nan::Null(); + v8ConversionSlot = Nan::Null(); } {% if freeFunctionName %} @@ -18,16 +21,27 @@ {% endif %} {% elsif cppClassName|isV8Value %} - - {% if isCppClassIntType %} - to = Nan::New<{{ cppClassName }}>(({{ parsedClassName }}){{= parsedName =}}); + {% if cType|isArrayType %} + v8::Local tmpArray = Nan::New({{ cType|toSizeOfArray }}); + for (unsigned int i = 0; i < {{ cType|toSizeOfArray }}; i++) { + v8::Local element; + {% if isCppClassIntType %} + element = Nan::New<{{ cppClassName }}>(({{ parsedClassName }}){{= parsedName =}}[i]); + {% else %} + element = Nan::New<{{ cppClassName }}>({% if needsDereference %}*{% endif %}{{= parsedName =}}[i]); + {% endif %} + Nan::Set(tmpArray, Nan::New(i), element); + } + v8ConversionSlot = tmpArray; + {% elsif isCppClassIntType %} + v8ConversionSlot = Nan::New<{{ cppClassName }}>(({{ parsedClassName }}){{= parsedName =}}); {% else %} - to = Nan::New<{{ cppClassName }}>({% if needsDereference %}*{% endif %}{{= parsedName =}}); + v8ConversionSlot = Nan::New<{{ cppClassName }}>({% if needsDereference %}*{% endif %}{{= parsedName =}}); {% endif %} {% elsif cppClassName == 'External' %} - to = Nan::New((void *){{= parsedName =}}); + v8ConversionSlot = Nan::New((void *){{= parsedName =}}); {% elsif cppClassName == 'Array' %} @@ -35,66 +49,77 @@ {% if size %} v8::Local tmpArray = Nan::New({{= parsedName =}}->{{ size }}); for (unsigned int i = 0; i < {{= parsedName =}}->{{ size }}; i++) { - Nan::Set(tmpArray, Nan::New(i), Nan::New({{= parsedName =}}->{{ key }}[i]).ToLocalChecked()); + v8::Local element; + {% if arrayElementCppClassName %} + element = {{ arrayElementCppClassName }}::New( + {{ cType|asElementPointer parsedName }}->{{ key }}[i], + {{ selfFreeing|toBool }} + {% if hasOwner %} + , owners + {% endif %} + ); + {% else %} + element = Nan::New({{= parsedName =}}->{{ key }}[i]).ToLocalChecked(); + {% endif %} + Nan::Set(tmpArray, Nan::New(i), element); } {% else %} v8::Local tmpArray = Nan::New({{= parsedName =}}); {% endif %} - to = tmpArray; + v8ConversionSlot = tmpArray; {% elsif cppClassName == 'GitBuf' %} {% if doNotConvert %} - to = Nan::Null(); + v8ConversionSlot = Nan::Null(); {% else %} if ({{= parsedName =}}) { - to = Nan::New({{= parsedName =}}->ptr, {{= parsedName = }}->size).ToLocalChecked(); + v8ConversionSlot = Nan::New({{= parsedName =}}->ptr, {{= parsedName = }}->size).ToLocalChecked(); } else { - to = Nan::Null(); + v8ConversionSlot = Nan::Null(); } {% endif %} {% else %} - {% if copy %} - if ({{= parsedName =}} != NULL) { - {{= parsedName =}} = ({{ cType|replace '**' '*' }} {% if not cType|isPointer %}*{% endif %}){{ copy }}({{= parsedName =}}); - } + {% if cType|isArrayType %} + v8::Local tmpArray = Nan::New({{ cType|toSizeOfArray }}); + for (unsigned int i = 0; i < {{ cType|toSizeOfArray }}; i++) { {% endif %} - - if ({{= parsedName =}} != NULL) { + if ({{ cType|asElementPointer parsedName }} != NULL) { {% if hasOwner %} v8::Local owners = Nan::New(0); {% if ownedBy %} {% if isAsync %} {% each ownedBy as owner %} - Nan::Set(owners, Nan::New(owners->Length()), this->GetFromPersistent("{{= owner =}}")->ToObject()); + {%-- If the owner of this object is "this" in an async method, it will be stored in the persistent handle by name. --%} + Nan::Set(owners, Nan::New(owners->Length()), Nan::To(this->GetFromPersistent("{{= owner =}}")).ToLocalChecked()); {% endeach %} {% else %} {% each ownedByIndices as ownedByIndex %} - Nan::Set(owners, Nan::New(owners->Length()), info[{{= ownedByIndex =}}]->ToObject()); + Nan::Set(owners, Nan::New(owners->Length()), Nan::To(info[{{= ownedByIndex =}}]).ToLocalChecked()); {% endeach %} {% endif %} {% endif %} {%if isAsync %} {% elsif ownedByThis %} + {%-- If the owner of this object is "this", it will be retrievable from the info object in a sync method. --%} Nan::Set(owners, owners->Length(), info.This()); {% endif %} {% if ownerFn | toBool %} Nan::Set( owners, Nan::New(owners->Length()), - {{= ownerFn.singletonCppClassName =}}::New( - {{= ownerFn.name =}}({{= parsedName =}}), + Nan::To({{= ownerFn.singletonCppClassName =}}::New( + {{= ownerFn.name =}}({{ cType|asElementPointer parsedName }}), true - )->ToObject() + )).ToLocalChecked() ); {% endif %} {% endif %} - // {{= cppClassName }} {{= parsedName }} {% if cppClassName == 'Wrapper' %} - to = {{ cppClassName }}::New({{= parsedName =}}); + v8ConversionSlot = {{ cppClassName }}::New({{ cType|asElementPointer parsedName }}); {% else %} - to = {{ cppClassName }}::New( - {{= parsedName =}}, + v8ConversionSlot = {{ cppClassName }}::New( + {{ cType|asElementPointer parsedName }}, {{ selfFreeing|toBool }} {% if hasOwner %} , owners @@ -103,8 +128,12 @@ {% endif %} } else { - to = Nan::Null(); + v8ConversionSlot = Nan::Null(); } - + {% if cType|isArrayType %} + Nan::Set(tmpArray, Nan::New(i), v8ConversionSlot); + } + v8ConversionSlot = tmpArray; + {% endif %} {% endif %} // end convert_to_v8 block diff --git a/generate/templates/partials/field_accessors.cc b/generate/templates/partials/field_accessors.cc index 6325587d33..bcd5e5f870 100644 --- a/generate/templates/partials/field_accessors.cc +++ b/generate/templates/partials/field_accessors.cc @@ -7,16 +7,9 @@ {% if field.isEnum %} info.GetReturnValue().Set(Nan::New((int)wrapper->GetValue()->{{ field.name }})); - {% elsif field.isLibgitType | or field.payloadFor %} + {% elsif field.isLibgitType %} info.GetReturnValue().Set(Nan::New(wrapper->{{ field.name }})); - {% elsif field.isCallbackFunction %} - if (wrapper->{{field.name}}.HasCallback()) { - info.GetReturnValue().Set(wrapper->{{ field.name }}.GetCallback()->GetFunction()); - } else { - info.GetReturnValue().SetUndefined(); - } - {% elsif field.cppClassName == 'String' %} if (wrapper->GetValue()->{{ field.name }}) { info.GetReturnValue().Set(Nan::New(wrapper->GetValue()->{{ field.name }}).ToLocalChecked()); @@ -39,60 +32,32 @@ } {% elsif field.isLibgitType %} - v8::Local {{ field.name }}(value->ToObject()); + v8::Local {{ field.name }}(Nan::To(value).ToLocalChecked()); wrapper->{{ field.name }}.Reset({{ field.name }}); - wrapper->raw->{{ field.name }} = {% if not field.cType | isPointer %}*{% endif %}{% if field.cppClassName == 'GitStrarray' %}StrArrayConverter::Convert({{ field.name }}->ToObject()){% else %}Nan::ObjectWrap::Unwrap<{{ field.cppClassName }}>({{ field.name }}->ToObject())->GetValue(){% endif %}; - - {% elsif field.isCallbackFunction %} - Nan::Callback *callback = NULL; - int throttle = {%if field.return.throttle %}{{ field.return.throttle }}{%else%}0{%endif%}; - bool waitForResult = true; - - if (value->IsFunction()) { - callback = new Nan::Callback(value.As()); - } else if (value->IsObject()) { - v8::Local object = value.As(); - v8::Local callbackKey; - Nan::MaybeLocal maybeObjectCallback = Nan::Get(object, Nan::New("callback").ToLocalChecked()); - if (!maybeObjectCallback.IsEmpty()) { - v8::Local objectCallback = maybeObjectCallback.ToLocalChecked(); - if (objectCallback->IsFunction()) { - callback = new Nan::Callback(objectCallback.As()); - - Nan::MaybeLocal maybeObjectThrottle = Nan::Get(object, Nan::New("throttle").ToLocalChecked()); - if(!maybeObjectThrottle.IsEmpty()) { - v8::Local objectThrottle = maybeObjectThrottle.ToLocalChecked(); - if (objectThrottle->IsNumber()) { - throttle = (int)objectThrottle.As()->Value(); - } - } - - Nan::MaybeLocal maybeObjectWaitForResult = Nan::Get(object, Nan::New("waitForResult").ToLocalChecked()); - if(!maybeObjectWaitForResult.IsEmpty()) { - Local objectWaitForResult = maybeObjectWaitForResult.ToLocalChecked(); - waitForResult = (bool)objectWaitForResult->BooleanValue(); - } + {% if field.cppClassName == 'GitStrarray' %} + wrapper->raw->{{ field.name }} = {% if not field.cType | isPointer %}*{% endif %}StrArrayConverter::Convert({{ field.name }}); + {% else %} + auto wrappedObject = Nan::ObjectWrap::Unwrap<{{ field.cppClassName }}>({{ field.name }}); + wrapper->raw->{{ field.name }} = {% if not field.cType | isPointer %}*{% endif %}wrappedObject->GetValue(); + {%-- We are assuming that users are responsible enough to not replace fields on their structs mid-operation, and would rather build out code to prevent that than be smarter here --%} + wrapper->AddReferenceCallbacks( + {{ field.index }}, + [wrappedObject]() { + wrappedObject->Reference(); + }, + [wrappedObject]() { + wrappedObject->Unreference(); } - } - } - if (callback) { - if (!wrapper->raw->{{ field.name }}) { - wrapper->raw->{{ field.name }} = ({{ field.cType }}){{ field.name }}_cppCallback; - } - - wrapper->{{ field.name }}.SetCallback(callback, throttle, waitForResult); - } - - {% elsif field.payloadFor %} - wrapper->{{ field.name }}.Reset(value); + ); + {% endif %} {% elsif field.cppClassName == 'String' %} if (wrapper->GetValue()->{{ field.name }}) { } - String::Utf8Value str(value); + Nan::Utf8String str(value); wrapper->GetValue()->{{ field.name }} = strdup(*str); {% elsif field.isCppClassIntType %} @@ -106,255 +71,5 @@ } {% endif %} } - - {% if field.isCallbackFunction %} - {{ cppClassName }}* {{ cppClassName }}::{{ field.name }}_getInstanceFromBaton({{ field.name|titleCase }}Baton* baton) { - {% if isExtendedStruct %} - return static_cast<{{ cppClassName }}*>((({{cType}}_extended *)baton->self)->payload); - {% else %} - return static_cast<{{ cppClassName }}*>(baton-> - {% each field.args|argsInfo as arg %} - {% if arg.payload == true %} - {{arg.name}} - {% elsif arg.lastArg %} - {{arg.name}} - {% endif %} - {% endeach %}); - {% endif %} - } - - {{ field.return.type }} {{ cppClassName }}::{{ field.name }}_cppCallback ( - {% each field.args|argsInfo as arg %} - {{ arg.cType }} {{ arg.name}}{% if not arg.lastArg %},{% endif %} - {% endeach %} - ) { - {{ field.name|titleCase }}Baton *baton = - new {{ field.name|titleCase }}Baton({{ field.return.noResults }}); - - {% each field.args|argsInfo as arg %} - baton->{{ arg.name }} = {{ arg.name }}; - {% endeach %} - - {{ cppClassName }}* instance = {{ field.name }}_getInstanceFromBaton(baton); - - {% if field.return.type == "void" %} - if (instance->{{ field.name }}.WillBeThrottled()) { - delete baton; - } else if (instance->{{ field.name }}.ShouldWaitForResult()) { - baton->ExecuteAsync({{ field.name }}_async); - delete baton; - } else { - baton->ExecuteAsync({{ field.name }}_async, deleteBaton); - } - return; - {% else %} - {{ field.return.type }} result; - - if (instance->{{ field.name }}.WillBeThrottled()) { - result = baton->defaultResult; - delete baton; - } else if (instance->{{ field.name }}.ShouldWaitForResult()) { - result = baton->ExecuteAsync({{ field.name }}_async); - delete baton; - } else { - result = baton->defaultResult; - baton->ExecuteAsync({{ field.name }}_async, deleteBaton); - } - return result; - {% endif %} - } - - - void {{ cppClassName }}::{{ field.name }}_async(void *untypedBaton) { - Nan::HandleScope scope; - - {{ field.name|titleCase }}Baton* baton = static_cast<{{ field.name|titleCase }}Baton*>(untypedBaton); - {{ cppClassName }}* instance = {{ field.name }}_getInstanceFromBaton(baton); - - if (instance->{{ field.name }}.GetCallback()->IsEmpty()) { - {% if field.return.type == "int" %} - baton->result = baton->defaultResult; // no results acquired - {% endif %} - baton->Done(); - return; - } - - {% each field.args|argsInfo as arg %} - {% if arg.name == "payload" %} - {%-- Do nothing --%} - {% elsif arg.isJsArg %} - {% if arg.cType == "const char *" %} - if (baton->{{ arg.name }} == NULL) { - baton->{{ arg.name }} = ""; - } - {% elsif arg.cppClassName == "String" %} - v8::Local src; - if (baton->{{ arg.name }} == NULL) { - src = Nan::Null(); - } - else { - src = Nan::New(*baton->{{ arg.name }}).ToLocalChecked(); - } - {% endif %} - {% endif %} - {% endeach %} - - {% if field.isSelfReferential %} - {% if field.args|jsArgsCount|subtract 2| setUnsigned == 0 %} - v8::Local *argv = NULL; - {% else %} - v8::Local argv[{{ field.args|jsArgsCount|subtract 2| setUnsigned }}] = { - {% endif %} - {% else %} - v8::Local argv[{{ field.args|jsArgsCount }}] = { - {% endif %} - {% each field.args|argsInfo as arg %} - {% if field.isSelfReferential %} - {% if not arg.firstArg %} - {% if field.args|jsArgsCount|subtract 1|or 0 %} - {% if arg.cppClassName == "String" %} - {%-- src is always the last arg --%} - src - {% elsif arg.isJsArg %} - {% if arg.isEnum %} - Nan::New((int)baton->{{ arg.name }}), - {% elsif arg.isLibgitType %} - {{ arg.cppClassName }}::New(baton->{{ arg.name }}, false), - {% elsif arg.cType == "size_t" %} - Nan::New((unsigned int)baton->{{ arg.name }}), - {% elsif arg.name == "payload" %} - {%-- skip, filters should not have a payload --%} - {% else %} - Nan::New(baton->{{ arg.name }}), - {% endif %} - {% endif %} - {% endif %} - {% endif %} - {% else %} - {% if arg.name == "payload" %} - {%-- payload is always the last arg --%} - Nan::New(instance->{{ fields|payloadFor field.name }}) - {% elsif arg.isJsArg %} - {% if arg.isEnum %} - Nan::New((int)baton->{{ arg.name }}), - {% elsif arg.isLibgitType %} - {{ arg.cppClassName }}::New(baton->{{ arg.name }}, false), - {% elsif arg.cType == "size_t" %} - // HACK: NAN should really have an overload for Nan::New to support size_t - Nan::New((unsigned int)baton->{{ arg.name }}), - {% elsif arg.cppClassName == "String" %} - Nan::New(baton->{{ arg.name }}).ToLocalChecked(), - {% else %} - Nan::New(baton->{{ arg.name }}), - {% endif %} - {% endif %} - {% endif %} - {% endeach %} - {% if not field.isSelfReferential %} - }; - {% elsif field.args|jsArgsCount|subtract 2| setUnsigned > 0 %} - }; - {% endif %} - - Nan::TryCatch tryCatch; - - // TODO This should take an async_resource, but we will need to figure out how to pipe the correct context into this - {% if field.isSelfReferential %} - Nan::MaybeLocal maybeResult = Nan::Call(*(instance->{{ field.name }}.GetCallback()), {{ field.args|jsArgsCount|subtract 2| setUnsigned }}, argv); - {% else %} - Nan::MaybeLocal maybeResult = Nan::Call(*(instance->{{ field.name }}.GetCallback()), {{ field.args|jsArgsCount }}, argv); - {% endif %} - - v8::Local result; - if (!maybeResult.IsEmpty()) { - result = maybeResult.ToLocalChecked(); - } - - if(PromiseCompletion::ForwardIfPromise(result, baton, {{ cppClassName }}::{{ field.name }}_promiseCompleted)) { - return; - } - - {% if field.return.type == "void" %} - baton->Done(); - {% else %} - {% each field|returnsInfo false true as _return %} - if (result.IsEmpty() || result->IsNativeError()) { - baton->result = {{ field.return.error }}; - } - else if (!result->IsNull() && !result->IsUndefined()) { - {% if _return.isOutParam %} - {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(result->ToObject()); - wrapper->selfFreeing = false; - - *baton->{{ _return.name }} = wrapper->GetValue(); - baton->result = {{ field.return.success }}; - {% else %} - if (result->IsNumber()) { - baton->result = Nan::To(result).FromJust(); - } - else { - baton->result = baton->defaultResult; - } - {% endif %} - } - else { - baton->result = baton->defaultResult; - } - {% endeach %} - baton->Done(); - {% endif %} - } - - void {{ cppClassName }}::{{ field.name }}_promiseCompleted(bool isFulfilled, AsyncBaton *_baton, v8::Local result) { - Nan::HandleScope scope; - - {{ field.name|titleCase }}Baton* baton = static_cast<{{ field.name|titleCase }}Baton*>(_baton); - {% if field.return.type == "void" %} - baton->Done(); - {% else %} - if (isFulfilled) { - {% each field|returnsInfo false true as _return %} - if (result.IsEmpty() || result->IsNativeError()) { - baton->result = {{ field.return.error }}; - } - else if (!result->IsNull() && !result->IsUndefined()) { - {% if _return.isOutParam %} - {{ _return.cppClassName }}* wrapper = Nan::ObjectWrap::Unwrap<{{ _return.cppClassName }}>(result->ToObject()); - wrapper->selfFreeing = false; - - *baton->{{ _return.name }} = wrapper->GetValue(); - baton->result = {{ field.return.success }}; - {% else %} - if (result->IsNumber()) { - baton->result = Nan::To(result).FromJust(); - } - else{ - baton->result = baton->defaultResult; - } - {% endif %} - } - else { - baton->result = baton->defaultResult; - } - {% endeach %} - } - else { - // promise was rejected - {% if isExtendedStruct %} - {{ cppClassName }}* instance = static_cast<{{ cppClassName }}*>((({{cType}}_extended *)baton->self)->payload); - {% else %} - {{ cppClassName }}* instance = static_cast<{{ cppClassName }}*>(baton->{% each field.args|argsInfo as arg %} - {% if arg.payload == true %}{{arg.name}}{% elsif arg.lastArg %}{{arg.name}}{% endif %} - {% endeach %}); - {% endif %} - v8::Local parent = instance->handle(); - SetPrivate(parent, Nan::New("NodeGitPromiseError").ToLocalChecked(), result); - - baton->result = {{ field.return.error }}; - } - baton->Done(); - {% endif %} - } - {% endif %} {% endif %} {% endeach %} diff --git a/generate/templates/partials/fields.cc b/generate/templates/partials/fields.cc index 9d6e6e39d1..d9478549e5 100644 --- a/generate/templates/partials/fields.cc +++ b/generate/templates/partials/fields.cc @@ -1,28 +1,32 @@ {% each fields|fieldsInfo as field %} {% if not field.ignore %} + // start field block NAN_METHOD({{ cppClassName }}::{{ field.cppFunctionName }}) { - v8::Local to; + v8::Local v8ConversionSlot; {% if field | isFixedLengthString %} char* {{ field.name }} = (char *)Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(info.This())->GetValue()->{{ field.name }}; {% else %} - {{ field.cType }} - {% if not field.cppClassName|isV8Value %} - {% if not field.cType|isPointer %} - * + {% if field.cType|isArrayType %} + {{ field.cType|arrayTypeToPlainType }} *{{ field.name }} = + {% else %} + {{ field.cType }} + {% if not field.cppClassName|isV8Value %} + {% if not field.cType|isPointer %}*{% endif %} {% endif %} - {% endif %} - {{ field.name }} = - {% if not field.cppClassName|isV8Value %} - {% if not field.cType|isPointer %} - & + {{ field.name }} = + {% if not field.cppClassName|isV8Value %} + {% if field.cType|isArrayType %}{% elsif not field.cType|isPointer %} + & + {% endif %} {% endif %} {% endif %} Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(info.This())->GetValue()->{{ field.name }}; {% endif %} {% partial convertToV8 field %} - info.GetReturnValue().Set(to); + info.GetReturnValue().Set(v8ConversionSlot); } + // end field block {% endif %} {% endeach %} diff --git a/generate/templates/partials/sync_function.cc b/generate/templates/partials/sync_function.cc index 5f1306daed..cf6febe67a 100644 --- a/generate/templates/partials/sync_function.cc +++ b/generate/templates/partials/sync_function.cc @@ -17,7 +17,7 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%if not arg.isReturn %} {%partial convertFromV8 arg %} {%if arg.saveArg %} - v8::Local {{ arg.name }}(info[{{ arg.jsArg }}]->ToObject()); + v8::Local {{ arg.name }}(Nan::To(info[{{ arg.jsArg }}]).ToLocalChecked()); {{ cppClassName }} *thisObj = Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(info.This()); thisObj->{{ cppFunctionName }}_{{ arg.name }}.Reset({{ arg.name }}); @@ -31,10 +31,10 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { if (Nan::ObjectWrap::Unwrap<{{ cppClassName }}>(info.This())->GetValue() != NULL) { {%endif%} - giterr_clear(); + git_error_clear(); { // lock master scope start - LockMaster lockMaster( + nodegit::LockMaster lockMaster( /*asyncAction: */false {%each args|argsInfo as arg %} {%if arg.cType|isPointer%} @@ -50,7 +50,7 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%endeach%} ); - {%if .|hasReturnValue %} {{ return.cType }} result = {%endif%} + {%if .|hasReturnType %} {{ return.cType }} result = {%endif%} {{ cFunctionName }}( {%each args|argsInfo as arg %} {%if arg.isReturn %} @@ -67,20 +67,20 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { {%endeach%} ); - {%if .|hasReturnValue |and return.isErrorCode %} + {%if .|hasReturnType |and return.isErrorCode %} if (result != GIT_OK) { {%each args|argsInfo as arg %} - {%if arg.shouldAlloc %} - free({{ arg.name }}); - {%elsif arg | isOid %} + {%if arg | isOid %} if (info[{{ arg.jsArg }}]->IsString()) { - free({{ arg.name }}); + free((void *)from_{{ arg.name }}); } + {%elsif arg.shouldAlloc %} + free({{ arg.name }}); {%endif%} {%endeach%} - if (giterr_last()) { - return Nan::ThrowError(giterr_last()->message); + if (git_error_last()->klass != GIT_ERROR_NONE) { + return Nan::ThrowError(git_error_last()->message); } else { return Nan::ThrowError("Unknown Error"); } @@ -111,18 +111,18 @@ NAN_METHOD({{ cppClassName }}::{{ cppFunctionName }}) { } {%endif%} - v8::Local to; + v8::Local v8ConversionSlot; {%if .|returnsCount > 1 %} v8::Local toReturn = Nan::New(); {%endif%} {%each .|returnsInfo as _return %} {%partial convertToV8 _return %} {%if .|returnsCount > 1 %} - Nan::Set(toReturn, Nan::New("{{ _return.returnNameOrName }}").ToLocalChecked(), to); + Nan::Set(toReturn, Nan::New("{{ _return.returnNameOrName }}").ToLocalChecked(), v8ConversionSlot); {%endif%} {%endeach%} {%if .|returnsCount == 1 %} - return info.GetReturnValue().Set(scope.Escape(to)); + return info.GetReturnValue().Set(scope.Escape(v8ConversionSlot)); {%else%} return info.GetReturnValue().Set(scope.Escape(toReturn)); {%endif%} diff --git a/generate/templates/partials/traits.h b/generate/templates/partials/traits.h index 3e63e42e83..9f1f2eed76 100644 --- a/generate/templates/partials/traits.h +++ b/generate/templates/partials/traits.h @@ -1,8 +1,14 @@ class {{ cppClassName }}; +{% if type == 'struct' %} +class Configurable{{ cppClassName }}; +{% endif %} struct {{ cppClassName }}Traits { typedef {{ cppClassName }} cppClass; typedef {{ cType }} cType; + {% if type == 'struct' %} + typedef Configurable{{ cppClassName }} configurableCppClass; + {% endif %} static const bool isDuplicable = {{ dupFunction|toBool |or cpyFunction|toBool}}; static void duplicate({{ cType }} **dest, {{ cType }} *src) { @@ -17,6 +23,7 @@ struct {{ cppClassName }}Traits { {% endif %} } + static std::string className() { return "{{ cppClassName }}"; }; static const bool isSingleton = {{ isSingleton | toBool }}; static const bool isFreeable = {{ freeFunctionName | toBool}}; static void free({{ cType }} *raw) { diff --git a/generate/templates/templates/binding.gyp b/generate/templates/templates/binding.gyp index b1aada5cd6..4476c9236d 100644 --- a/generate/templates/templates/binding.gyp +++ b/generate/templates/templates/binding.gyp @@ -1,70 +1,23 @@ { - "conditions": [ - ["(OS=='win' and node_root_dir.split('\\\\')[-1].startswith('iojs')) or (OS=='mac' and node_root_dir.split('/')[-1].startswith('iojs'))", { - "conditions": [ - ["OS=='win'", { - "variables": { - "is_electron%": "1", - "openssl_include_dir%": "<(module_root_dir)\\vendor\\openssl" - } - }, { - "variables": { - "is_electron%": "1", - "openssl_include_dir%": "<(module_root_dir)/vendor/openssl" - } - }] - ], - }, { - "conditions": [ - ["OS=='win'", { - "variables": { - "is_electron%": "0", - "openssl_include_dir%": "<(node_root_dir)\\include\\node" - } - }, { - "variables": { - "is_electron%": "0", - "openssl_include_dir%": "<(node_root_dir)/include/node" - } - }] - ] - }] - ], + "variables": { + "variables": { + "target%": "none", + }, + "is_electron%": " target) { + void {{ cppClassName }}::InitializeComponent(v8::Local target, nodegit::Context *nodegitContext) { Nan::HandleScope scope; - v8::Local tpl = Nan::New(JSNewFunction); + v8::Local nodegitExternal = Nan::New(nodegitContext); + v8::Local tpl = Nan::New(JSNewFunction, nodegitExternal); - tpl->InstanceTemplate()->SetInternalFieldCount(1); + tpl->InstanceTemplate()->SetInternalFieldCount(2); tpl->SetClassName(Nan::New("{{ jsClassName }}").ToLocalChecked()); {% each functions as function %} {% if not function.ignore %} {% if function.isPrototypeMethod %} - Nan::SetPrototypeMethod(tpl, "{{ function.jsFunctionName }}", {{ function.cppFunctionName }}); + Nan::SetPrototypeMethod(tpl, "{{ function.jsFunctionName }}", {{ function.cppFunctionName }}, nodegitExternal); {% else %} - Nan::SetMethod(tpl, "{{ function.jsFunctionName }}", {{ function.cppFunctionName }}); + Nan::SetMethod(tpl, "{{ function.jsFunctionName }}", {{ function.cppFunctionName }}, nodegitExternal); {% endif %} {% endif %} {% endeach %} {% each fields as field %} {% if not field.ignore %} - Nan::SetPrototypeMethod(tpl, "{{ field.jsFunctionName }}", {{ field.cppFunctionName }}); + Nan::SetPrototypeMethod(tpl, "{{ field.jsFunctionName }}", {{ field.cppFunctionName }}, nodegitExternal); {% endif %} {% endeach %} InitializeTemplate(tpl); - v8::Local _constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); - constructor_template.Reset(_constructor_template); - Nan::Set(target, Nan::New("{{ jsClassName }}").ToLocalChecked(), _constructor_template); + v8::Local constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); + nodegitContext->SaveToPersistent("{{ cppClassName }}::Template", constructor_template); + Nan::Set(target, Nan::New("{{ jsClassName }}").ToLocalChecked(), constructor_template); } {% else %} - void {{ cppClassName }}::InitializeComponent(v8::Local target) { + void {{ cppClassName }}::InitializeComponent(v8::Local target, nodegit::Context *nodegitContext) { Nan::HandleScope scope; + Local nodegitExternal = Nan::New(nodegitContext); - v8::Local object = Nan::New(); + {% if functions|hasFunctionOnRootProto %} + v8::Local object = Nan::New({{ functions|getCPPFunctionForRootProto }}, nodegitExternal); + {% else %} + v8::Local object = Nan::New(); + {% endif %} {% each functions as function %} {% if not function.ignore %} - Nan::SetMethod(object, "{{ function.jsFunctionName }}", {{ function.cppFunctionName }}); + Nan::SetMethod(object, "{{ function.jsFunctionName }}", {{ function.cppFunctionName }}, nodegitExternal); {% endif %} {% endeach %} - Nan::Set(target, Nan::New("{{ jsClassName }}").ToLocalChecked(), object); + Nan::Set( + target, + Nan::New("{{ jsClassName }}").ToLocalChecked(), + {% if functions|hasFunctionOnRootProto %} + Nan::GetFunction(object).ToLocalChecked() + {% else %} + object + {% endif %} + ); } {% endif %} diff --git a/generate/templates/templates/class_header.h b/generate/templates/templates/class_header.h index fce17e58cc..9112347825 100644 --- a/generate/templates/templates/class_header.h +++ b/generate/templates/templates/class_header.h @@ -2,17 +2,25 @@ #define {{ cppClassName|upper }}_H #include #include -#include #include -#include +#include +#include +#include +#include #include "async_baton.h" +#include "async_worker.h" +#include "cleanup_handle.h" +#include "context.h" +#include "lock_master.h" #include "nodegit_wrapper.h" #include "promise_completion.h" #include "reference_counter.h" +#include "worker_pool.h" extern "C" { #include +#include {%each cDependencies as dependency %} #include <{{ dependency }}> {%endeach%} @@ -54,7 +62,12 @@ class {{ cppClassName }} : public friend class NodeGitWrapper<{{ cppClassName }}Traits>; {%endif %} public: - static void InitializeComponent (v8::Local target); + {{ cppClassName }}(const {{ cppClassName }} &) = delete; + {{ cppClassName }}({{ cppClassName }} &&) = delete; + {{ cppClassName }} &operator=(const {{ cppClassName }} &) = delete; + {{ cppClassName }} &operator=({{ cppClassName }} &&) = delete; + + static void InitializeComponent (v8::Local target, nodegit::Context *nodegitContext); {% each functions as function %} {% if not function.ignore %} @@ -69,15 +82,18 @@ class {{ cppClassName }} : public {% endeach %} ); + static void {{ function.cppFunctionName }}_{{ arg.name }}_cancelAsync(void *baton); static void {{ function.cppFunctionName }}_{{ arg.name }}_async(void *baton); - static void {{ function.cppFunctionName }}_{{ arg.name }}_promiseCompleted(bool isFulfilled, AsyncBaton *_baton, v8::Local result); - struct {{ function.cppFunctionName }}_{{ arg.name|titleCase }}Baton : public AsyncBatonWithResult<{{ arg.return.type }}> { + static void {{ function.cppFunctionName }}_{{ arg.name }}_promiseCompleted(bool isFulfilled, nodegit::AsyncBaton *_baton, v8::Local result); + class {{ function.cppFunctionName }}_{{ arg.name|titleCase }}Baton : public nodegit::AsyncBatonWithResult<{{ arg.return.type }}> { + public: {% each arg.args|argsInfo as cbArg %} {{ cbArg.cType }} {{ cbArg.name }}; {% endeach %} + {{ function.cppFunctionName }}_{{ arg.name|titleCase }}Baton(const {{ arg.return.type }} &defaultResult) - : AsyncBatonWithResult<{{ arg.return.type }}>(defaultResult) { + : nodegit::AsyncBatonWithResult<{{ arg.return.type }}>(defaultResult) { } }; {% endif %} @@ -103,16 +119,6 @@ class {{ cppClassName }} : public ~{{ cppClassName }}(); {%endif%} - {% each functions as function %} - {% if not function.ignore %} - {% each function.args as arg %} - {% if arg.saveArg %} - Nan::Persistent {{ function.cppFunctionName }}_{{ arg.name }}; - {% endif %} - {% endeach %} - {% endif %} - {% endeach %} - {%each fields as field%} {%if not field.ignore%} static NAN_METHOD({{ field.cppFunctionName }}); @@ -136,17 +142,29 @@ class {{ cppClassName }} : public {%endif%} {%endif%} {%endeach%} + {% if function.return.isResultOrError %} + {% elsif function.return.isErrorCode %} + {% elsif function.return.cType != 'void' %} + {{ function.return.cType }} result; + {% endif %} }; - class {{ function.cppFunctionName }}Worker : public Nan::AsyncWorker { + class {{ function.cppFunctionName }}Worker : public nodegit::AsyncWorker { public: {{ function.cppFunctionName }}Worker( {{ function.cppFunctionName }}Baton *_baton, - Nan::Callback *callback - ) : Nan::AsyncWorker(callback) + Nan::Callback *callback, + std::map> &cleanupHandles + ) : nodegit::AsyncWorker(callback, "nodegit:AsyncWorker:{{ cppClassName }}:{{ function.cppFunctionName }}", cleanupHandles) , baton(_baton) {}; + {{ function.cppFunctionName }}Worker(const {{ function.cppFunctionName }}Worker &) = delete; + {{ function.cppFunctionName }}Worker({{ function.cppFunctionName }}Worker &&) = delete; + {{ function.cppFunctionName }}Worker &operator=(const {{ function.cppFunctionName }}Worker &) = delete; + {{ function.cppFunctionName }}Worker &operator=({{ function.cppFunctionName }}Worker &&) = delete; ~{{ function.cppFunctionName }}Worker() {}; void Execute(); + void HandleErrorCallback(); void HandleOKCallback(); + nodegit::LockMaster AcquireLocks(); private: {{ function.cppFunctionName }}Baton *baton; @@ -176,6 +194,11 @@ class {{ cppClassName }} : public {%endeach%} } + {{ function.cppFunctionName }}_globalPayload(const {{ function.cppFunctionName }}_globalPayload &) = delete; + {{ function.cppFunctionName }}_globalPayload({{ function.cppFunctionName }}_globalPayload &&) = delete; + {{ function.cppFunctionName }}_globalPayload &operator=(const {{ function.cppFunctionName }}_globalPayload &) = delete; + {{ function.cppFunctionName }}_globalPayload &operator=({{ function.cppFunctionName }}_globalPayload &&) = delete; + ~{{ function.cppFunctionName }}_globalPayload() { {%each function.args as arg %} {%if arg.isCallbackFunction %} diff --git a/generate/templates/templates/nodegit.cc b/generate/templates/templates/nodegit.cc index c1eddd3283..e43f8b2ae5 100644 --- a/generate/templates/templates/nodegit.cc +++ b/generate/templates/templates/nodegit.cc @@ -5,12 +5,13 @@ #include #include #include - #include +#include #include "../include/init_ssh2.h" #include "../include/lock_master.h" #include "../include/nodegit.h" +#include "../include/context.h" #include "../include/wrapper.h" #include "../include/promise_completion.h" #include "../include/functions/copy.h" @@ -23,76 +24,22 @@ #include "../include/convenient_hunk.h" #include "../include/filter_registry.h" -#if (NODE_MODULE_VERSION > 48) - v8::Local GetPrivate(v8::Local object, - v8::Local key) { - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - v8::Local context = isolate->GetCurrentContext(); - v8::Local privateKey = v8::Private::ForApi(isolate, key); - v8::Local value; - v8::Maybe result = object->HasPrivate(context, privateKey); - if (!(result.IsJust() && result.FromJust())) - return v8::Local(); - if (object->GetPrivate(context, privateKey).ToLocal(&value)) - return value; - return v8::Local(); - } - - void SetPrivate(v8::Local object, - v8::Local key, - v8::Local value) { - if (value.IsEmpty()) - return; - v8::Isolate* isolate = v8::Isolate::GetCurrent(); - v8::Local context = isolate->GetCurrentContext(); - v8::Local privateKey = v8::Private::ForApi(isolate, key); - object->SetPrivate(context, privateKey, value); - } -#else - v8::Local GetPrivate(v8::Local object, - v8::Local key) { - return object->GetHiddenValue(key); - } - - void SetPrivate(v8::Local object, - v8::Local key, - v8::Local value) { - object->SetHiddenValue(key, value); - } -#endif +using namespace v8; -void LockMasterEnable(const FunctionCallbackInfo& info) { - LockMaster::Enable(); +Local GetPrivate(Local object, Local key) { + Local value; + Nan::Maybe result = Nan::HasPrivate(object, key); + if (!(result.IsJust() && result.FromJust())) + return Local(); + if (Nan::GetPrivate(object, key).ToLocal(&value)) + return value; + return Local(); } -void LockMasterSetStatus(const FunctionCallbackInfo& info) { - Nan::HandleScope scope; - - // convert the first argument to Status - if(info.Length() >= 0 && info[0]->IsNumber()) { - v8::Local value = info[0]->ToInt32(v8::Isolate::GetCurrent()); - LockMaster::Status status = static_cast(value->Value()); - if(status >= LockMaster::Disabled && status <= LockMaster::Enabled) { - LockMaster::SetStatus(status); - return; - } - } - - // argument error - Nan::ThrowError("Argument must be one 0, 1 or 2"); -} - -void LockMasterGetStatus(const FunctionCallbackInfo& info) { - info.GetReturnValue().Set(Nan::New(LockMaster::GetStatus())); -} - -void LockMasterGetDiagnostics(const FunctionCallbackInfo& info) { - LockMaster::Diagnostics diagnostics(LockMaster::GetDiagnostics()); - - // return a plain JS object with properties - v8::Local result = Nan::New(); - result->Set(Nan::New("storedMutexesCount").ToLocalChecked(), Nan::New(diagnostics.storedMutexesCount)); - info.GetReturnValue().Set(result); +void SetPrivate(Local object, Local key, Local value) { + if (value.IsEmpty()) + return; + Nan::SetPrivate(object, key, value); } static uv_mutex_t *opensslMutexes; @@ -120,42 +67,62 @@ void OpenSSL_ThreadSetup() { CRYPTO_THREADID_set_callback(OpenSSL_IDCallback); } -ThreadPool libgit2ThreadPool(10, uv_default_loop()); +// diagnostic function +NAN_METHOD(GetNumberOfTrackedObjects) { + nodegit::Context *currentNodeGitContext = nodegit::Context::GetCurrentContext(); + assert (currentNodeGitContext != nullptr); + info.GetReturnValue().Set(currentNodeGitContext->TrackerListSize()); +} -extern "C" void init(v8::Local target) { - // Initialize thread safety in openssl and libssh2 - OpenSSL_ThreadSetup(); - init_ssh2(); - // Initialize libgit2. - git_libgit2_init(); +static std::once_flag libraryInitializedFlag; +static std::mutex libraryInitializationMutex; + +NAN_MODULE_INIT(init) { + { + // We only want to do initialization logic once, and we also want to prevent any thread from completely loading + // the module until initialization has occurred. + // All of this initialization logic ends up being shared. + const std::lock_guard lock(libraryInitializationMutex); + std::call_once(libraryInitializedFlag, []() { + // Initialize thread safety in openssl and libssh2 + OpenSSL_ThreadSetup(); + init_ssh2(); + // Initialize libgit2. + git_libgit2_init(); + + // Register thread pool with libgit2 + nodegit::ThreadPool::InitializeGlobal(); + }); + } + + // Exports function 'getNumberOfTrackedObjects' + Nan::Set(target + , Nan::New("getNumberOfTrackedObjects").ToLocalChecked() + , Nan::GetFunction(Nan::New(GetNumberOfTrackedObjects)).ToLocalChecked() + ); Nan::HandleScope scope; + Local context = Nan::GetCurrentContext(); + Isolate *isolate = context->GetIsolate(); + nodegit::Context *nodegitContext = new nodegit::Context(isolate); - Wrapper::InitializeComponent(target); - PromiseCompletion::InitializeComponent(); + Wrapper::InitializeComponent(target, nodegitContext); + PromiseCompletion::InitializeComponent(nodegitContext); {% each %} - {% if type != "enum" %} - {{ cppClassName }}::InitializeComponent(target); + {% if type == 'class' %} + {{ cppClassName }}::InitializeComponent(target, nodegitContext); + {% elsif type == 'struct' %} + {% if isReturnable %} + {{ cppClassName }}::InitializeComponent(target, nodegitContext); + {% endif %} {% endif %} {% endeach %} - ConvenientHunk::InitializeComponent(target); - ConvenientPatch::InitializeComponent(target); - GitFilterRegistry::InitializeComponent(target); - - NODE_SET_METHOD(target, "enableThreadSafety", LockMasterEnable); - NODE_SET_METHOD(target, "setThreadSafetyStatus", LockMasterSetStatus); - NODE_SET_METHOD(target, "getThreadSafetyStatus", LockMasterGetStatus); - NODE_SET_METHOD(target, "getThreadSafetyDiagnostics", LockMasterGetDiagnostics); - - v8::Local threadSafety = Nan::New(); - threadSafety->Set(Nan::New("DISABLED").ToLocalChecked(), Nan::New((int)LockMaster::Disabled)); - threadSafety->Set(Nan::New("ENABLED_FOR_ASYNC_ONLY").ToLocalChecked(), Nan::New((int)LockMaster::EnabledForAsyncOnly)); - threadSafety->Set(Nan::New("ENABLED").ToLocalChecked(), Nan::New((int)LockMaster::Enabled)); - - target->Set(Nan::New("THREAD_SAFETY").ToLocalChecked(), threadSafety); + ConvenientHunk::InitializeComponent(target, nodegitContext); + ConvenientPatch::InitializeComponent(target, nodegitContext); + GitFilterRegistry::InitializeComponent(target, nodegitContext); - LockMaster::Initialize(); + nodegit::LockMaster::InitializeContext(); } -NODE_MODULE(nodegit, init) +NAN_MODULE_WORKER_ENABLED(nodegit, init) diff --git a/generate/templates/templates/nodegit.js b/generate/templates/templates/nodegit.js index 15b8f322c8..43e7d9de02 100644 --- a/generate/templates/templates/nodegit.js +++ b/generate/templates/templates/nodegit.js @@ -1,5 +1,11 @@ var _ = require("lodash"); -var promisify = require("promisify-node"); +var util = require("util"); +var worker; + +try { + worker = require("worker_threads"); +} catch (e) {} + var rawApi; // Attempt to load the production release first, if it fails fall back to the @@ -16,6 +22,8 @@ catch (ex) { rawApi = require("../build/Debug/nodegit.node"); } +var promisify = fn => fn && util.promisify(fn); // jshint ignore:line + // For disccussion on why `cloneDeep` is required, see: // https://github.com/facebook/jest/issues/3552 // https://github.com/facebook/jest/issues/3550 @@ -26,6 +34,13 @@ rawApi = _.cloneDeep(rawApi); // have to override them here /* jshint ignore:start */ {% each . as idef %} + {% if idef.type == 'struct' %} + rawApi.{{ idef.jsClassName }} = util.deprecate(function {{ idef.jsClassName }}() { + try { + require("./deprecated/structs/{{ idef.jsClassName }}").call(this, rawApi); + } catch (error) {/* allow these to be undefined */} + }, "Instantiation of {{ idef.jsClassName }} is deprecated and will be removed in an upcoming version"); + {% endif %} {% if idef.type != "enum" %} {% if idef.functions.length > 0 %} @@ -92,9 +107,7 @@ var importExtension = function(name) { // Load up utils rawApi.Utils = {}; require("./utils/lookup_wrapper"); -require("./utils/normalize_options"); require("./utils/shallow_clone"); -require("./utils/normalize_fetch_options"); // Load up extra types; require("./status_file"); @@ -132,9 +145,6 @@ importExtension("filter_registry"); {% endeach %} /* jshint ignore:end */ -// Wrap asynchronous methods to return promises. -promisify(exports); - // Set version. exports.version = require("../package").version; diff --git a/generate/templates/templates/struct_content.cc b/generate/templates/templates/struct_content.cc index 9840cfe4f2..d7355d2a29 100644 --- a/generate/templates/templates/struct_content.cc +++ b/generate/templates/templates/struct_content.cc @@ -1,3 +1,4 @@ +// generated from struct_content.cc #include #include #ifdef WIN32 @@ -28,107 +29,336 @@ using namespace v8; using namespace node; using namespace std; - -// generated from struct_content.cc -{{ cppClassName }}::{{ cppClassName }}() : NodeGitWrapper<{{ cppClassName }}Traits>(NULL, true, v8::Local()) -{ - {% if ignoreInit == true %} - this->raw = new {{ cType }}; - {% else %} - {% if isExtendedStruct %} - {{ cType }}_extended wrappedValue = {{ cType|upper }}_INIT; - this->raw = ({{ cType }}*) malloc(sizeof({{ cType }}_extended)); - memcpy(this->raw, &wrappedValue, sizeof({{ cType }}_extended)); +{% if isReturnable %} + {{ cppClassName }}::{{ cppClassName }}() : NodeGitWrapper<{{ cppClassName }}Traits>(NULL, true, v8::Local()) + { + {% if ignoreInit == true %} + this->raw = new {{ cType }}; {% else %} - {{ cType }} wrappedValue = {{ cType|upper }}_INIT; - this->raw = ({{ cType }}*) malloc(sizeof({{ cType }})); - memcpy(this->raw, &wrappedValue, sizeof({{ cType }})); + {% if isExtendedStruct %} + {{ cType }}_extended wrappedValue = {{ cType|upper }}_INIT; + this->raw = ({{ cType }}*) malloc(sizeof({{ cType }}_extended)); + memcpy(this->raw, &wrappedValue, sizeof({{ cType }}_extended)); + {% else %} + {{ cType }} wrappedValue = {{ cType|upper }}_INIT; + this->raw = ({{ cType }}*) malloc(sizeof({{ cType }})); + memcpy(this->raw, &wrappedValue, sizeof({{ cType }})); + {% endif %} {% endif %} - {% endif %} - this->ConstructFields(); -} + this->ConstructFields(); + } + + {{ cppClassName }}::{{ cppClassName }}({{ cType }}* raw, bool selfFreeing, v8::Local owner) + : NodeGitWrapper<{{ cppClassName }}Traits>(raw, selfFreeing, owner) + { + this->ConstructFields(); + } + + {{ cppClassName }}::~{{ cppClassName }}() { + {% each fields|fieldsInfo as field %} + {% if not field.ignore %} + {% if not field.isEnum %} + {% if field.isLibgitType %} + this->{{ field.name }}.Reset(); + {% endif %} + {% endif %} + {% endif %} + {% endeach %} + } + + void {{ cppClassName }}::ConstructFields() { + {% each fields|fieldsInfo as field %} + {% if not field.ignore %} + {% if not field.isEnum %} + {% if field.isLibgitType %} + v8::Local {{ field.name }}Temp = Nan::To({{ field.cppClassName }}::New( + {%if not field.cType|isPointer %}&{%endif%}this->raw->{{ field.name }}, + false + )).ToLocalChecked(); + this->{{ field.name }}.Reset({{ field.name }}Temp); + {% endif %} + {% endif %} + {% endif %} + {% endeach %} + } + + void {{ cppClassName }}::InitializeComponent(Local target, nodegit::Context *nodegitContext) { + Nan::HandleScope scope; + + Local nodegitExternal = Nan::New(nodegitContext); + Local tpl = Nan::New(JSNewFunction, nodegitExternal); + + tpl->InstanceTemplate()->SetInternalFieldCount(2); + tpl->SetClassName(Nan::New("{{ jsClassName }}").ToLocalChecked()); + + {% each fields as field %} + {% if not field.ignore %} + {% if not field | isPayload %} + Nan::SetAccessor(tpl->InstanceTemplate(), Nan::New("{{ field.jsFunctionName }}").ToLocalChecked(), Get{{ field.cppFunctionName}}, Set{{ field.cppFunctionName}}, nodegitExternal); + {% endif %} + {% endif %} + {% endeach %} + + InitializeTemplate(tpl); + + v8::Local constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); + nodegitContext->SaveToPersistent("{{ cppClassName }}::Template", constructor_template); + } -{{ cppClassName }}::{{ cppClassName }}({{ cType }}* raw, bool selfFreeing, v8::Local owner) - : NodeGitWrapper<{{ cppClassName }}Traits>(raw, selfFreeing, owner) + {% partial fieldAccessors . %} + + // force base class template instantiation, to make sure we get all the + // methods, statics, etc. + template class NodeGitWrapper<{{ cppClassName }}Traits>; + +{% endif %} + +Configurable{{ cppClassName }}::Configurable{{ cppClassName }}(nodegit::Context *nodegitContext) + : nodegit::ConfigurableClassWrapper<{{ cppClassName }}Traits>(nodegitContext) { - this->ConstructFields(); + {% if ignoreInit == true %} + this->raw = ({{ cType }}*) malloc(sizeof({{ cType }})); + {% else %} + {{ cType }}{% if isExtendedStruct %}_extended{% endif %} wrappedValue = {{ cType|upper }}_INIT; + this->raw = ({{ cType }}*) malloc(sizeof({{ cType }}{% if isExtendedStruct %}_extended{% endif %})); + memcpy(this->raw, &wrappedValue, sizeof({{ cType }}{% if isExtendedStruct %}_extended{% endif %})); + {% endif %} } -{{ cppClassName }}::~{{ cppClassName }}() { +Configurable{{ cppClassName }}::~Configurable{{ cppClassName }}() { {% each fields|fieldsInfo as field %} {% if not field.ignore %} - {% if not field.isEnum %} - {% if field.isCallbackFunction %} - if (this->{{ field.name }}.HasCallback()) { - {% if isExtendedStruct %} - (({{ cType }}_extended *)this->raw)->payload = NULL; - {% else %} - this->raw->{{ fields|payloadFor field.name }} = NULL; - {% endif %} + {% if field.cppClassName == 'GitStrarray' %} + if (this->raw->{{ field.name }}.count) { + for (size_t i = 0; i < this->raw->{{ field.name }}.count; ++i) { + free(this->raw->{{ field.name }}.strings[i]); } - {% endif %} + free(this->raw->{{ field.name }}.strings); + } + {% elsif field.cppClassName == 'String' %} + free((void*)this->raw->{{ field.name }}); {% endif %} {% endif %} {% endeach %} } -void {{ cppClassName }}::ConstructFields() { - {% each fields|fieldsInfo as field %} +nodegit::ConfigurableClassWrapper<{{ cppClassName }}Traits>::v8ConversionResult Configurable{{ cppClassName }}::fromJavascript(nodegit::Context *nodegitContext, v8::Local input) { + if (!input->IsObject()) { + return { + "Must pass object for Configurable{{ cppClassName }}" + }; + } + + Nan::HandleScope scope; + v8::Local inputObj = input.As(); + std::shared_ptr output(new Configurable{{ cppClassName }}(nodegitContext)); + + // unpack the data into the correct fields + {% each fields as field %} {% if not field.ignore %} - {% if not field.isEnum %} - {% if field.hasConstructor |or field.isLibgitType %} - v8::Local {{ field.name }}Temp = {{ field.cppClassName }}::New( - {%if not field.cType|isPointer %}&{%endif%}this->raw->{{ field.name }}, - false - )->ToObject(); - this->{{ field.name }}.Reset({{ field.name }}Temp); - - {% elsif field.isCallbackFunction %} - - // Set the static method call and set the payload for this function to be - // the current instance - this->raw->{{ field.name }} = NULL; - {% if isExtendedStruct %} - (({{ cType }}_extended *)this->raw)->payload = (void *)this; - {% else %} - this->raw->{{ fields|payloadFor field.name }} = (void *)this; - {% endif %} - {% elsif field.payloadFor %} + {% if field.isClassType %} + {% if field.cppClassName == 'GitOid' %} + { + v8::Local maybeOid = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeOid.IsEmpty() && !maybeOid->IsUndefined() && !maybeOid->IsNull()) { + if (maybeOid->IsString()) { + Nan::Utf8String oidString(maybeOid.As()); + if (git_oid_fromstr(&output->raw->{{ field.name }}, *oidString) != GIT_OK) { + return { + git_error_last()->message + }; + } + } else if (maybeOid->IsObject()) { + if (git_oid_cpy(&output->raw->{{ field.name }}, Nan::ObjectWrap::Unwrap<{{ field.cppClassName }}>(maybeOid.As())->GetValue()) != GIT_OK) { + return { + git_error_last()->message + }; + } + } else { + return { + "Must pass String or NodeGit.Oid to {{ field.jsFunctionName }}" + }; + } + } + } + {% elsif field.cppClassName == 'GitStrarray' %} + output->raw->{{ field.name }}.count = 0; + output->raw->{{ field.name }}.strings = nullptr; + + { + v8::Local maybeStrarray = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeStrarray.IsEmpty() && !maybeStrarray->IsUndefined() && !maybeStrarray->IsNull()) { + if (maybeStrarray->IsArray()) { + v8::Local strarrayValue = maybeStrarray.As(); + // validate the StrArray is indeed a list of strings + for (uint32_t i = 0; i < strarrayValue->Length(); ++i) { + // TODO confirm that sparse array at least boils down to undefined + v8::Local arrayValue = Nan::Get(strarrayValue, i).ToLocalChecked(); + if (!arrayValue->IsString()) { + return { + "Must pass String or Array of strings to {{ field.jsFunctionName }}" + }; + } + } + + StrArrayConverter::ConvertInto(&output->raw->{{ field.name }}, strarrayValue); + } else if (maybeStrarray->IsString()) { + v8::Local strarrayValue = maybeStrarray.As(); + StrArrayConverter::ConvertInto(&output->raw->{{ field.name }}, strarrayValue); + } else { + return { + "Must pass String or Array of strings to {{ field.jsFunctionName }}" + }; + } + } + } + {% else %} + { + v8::Local maybeObject = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeObject.IsEmpty() && !maybeObject->IsUndefined() && !maybeObject->IsNull()) { + if (!maybeObject->IsObject()) { + return { + "Must pass NodeGit.{{ field.jsClassName }} to {{ field.jsFunctionName }}" + }; + } - v8::Local {{ field.name }} = Nan::Undefined(); - this->{{ field.name }}.Reset({{ field.name }}); + v8::Local objectValue = maybeObject.As(); + output->raw->{{ field.name }} = Nan::ObjectWrap::Unwrap<{{ field.cppClassName }}>(objectValue)->GetValue(); + output->{{ field.jsFunctionName }}.Reset(objectValue); + } + } {% endif %} - {% endif %} - {% endif %} - {% endeach %} -} + {% elsif field.isCallbackFunction %} + { + v8::Local maybeCallback = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeCallback.IsEmpty() && !maybeCallback->IsUndefined() && !maybeCallback->IsNull()) { + if (!maybeCallback->IsFunction() && !maybeCallback->IsObject()) { + return { + "Must pass Function or CallbackSpecifier to {{ field.jsFunctionName}}" + }; + } -void {{ cppClassName }}::InitializeComponent(v8::Local target) { - Nan::HandleScope scope; + std::unique_ptr callback; + uint32_t throttle = {% if field.return.throttle %}{{ field.return.throttle }}{% else %}0{% endif %}; + bool waitForResult = true; - v8::Local tpl = Nan::New(JSNewFunction); + if (maybeCallback->IsFunction()) { + callback.reset(new Nan::Callback(maybeCallback.As())); + } else { + v8::Local callbackSpecifier = maybeCallback.As(); + v8::Local maybeCallback = nodegit::safeGetField(callbackSpecifier, "callback"); + if (maybeCallback.IsEmpty() || !maybeCallback->IsFunction()) { + return { + "Must pass callback to CallbackSpecifier" + }; + } - tpl->InstanceTemplate()->SetInternalFieldCount(1); - tpl->SetClassName(Nan::New("{{ jsClassName }}").ToLocalChecked()); + callback.reset(new Nan::Callback(maybeCallback.As())); - {% each fields as field %} - {% if not field.ignore %} - {% if not field | isPayload %} - Nan::SetAccessor(tpl->InstanceTemplate(), Nan::New("{{ field.jsFunctionName }}").ToLocalChecked(), Get{{ field.cppFunctionName}}, Set{{ field.cppFunctionName}}); - {% endif %} + v8::Local maybeThrottle = nodegit::safeGetField(callbackSpecifier, "throttle"); + if (!maybeThrottle.IsEmpty() && !maybeThrottle->IsUndefined() && !maybeThrottle->IsNull()) { + if (!maybeThrottle->IsNumber()) { + return { + "Must pass zero or positive number as throttle to CallbackSpecifier" + }; + } + + throttle = maybeThrottle->Uint32Value(Nan::GetCurrentContext()).FromJust(); + } + + v8::Local maybeWaitForResult = nodegit::safeGetField(callbackSpecifier, "waitForResult"); + if (!maybeWaitForResult.IsEmpty() && !maybeWaitForResult->IsUndefined() && !maybeWaitForResult->IsNull()) { + if (!maybeWaitForResult->IsBoolean()) { + return { + "Must pass a boolean as waitForResult to callbackSpecifier" + }; + } + + waitForResult = Nan::To(maybeWaitForResult).FromJust(); + } + } + + output->{{ field.jsFunctionName }}.SetCallback(std::move(callback), throttle, waitForResult); + output->raw->{{ field.name }} = ({{ field.cType }}){{ field.jsFunctionName }}_cppCallback; + } + } + {% elsif field.isStructType %} + { + v8::Local maybeNestedObject = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeNestedObject.IsEmpty() && !maybeNestedObject->IsUndefined() && !maybeNestedObject->IsNull()) { + auto conversionResult = Configurable{{ field.cppClassName }}::fromJavascript(nodegitContext, maybeNestedObject); + if (!conversionResult.result) { + std::string error = "Failed to set {{ field.jsFunctionName }}: "; + error += conversionResult.error; + return { + error + }; + } + + auto child = conversionResult.result; + output->childCleanupVector.push_back(child); + output->raw->{{ field.name }} = *child->GetValue(); + } + } + {% elsif field.payloadFor %} + output->raw->{{ field.name }} = (void *)output.get(); + {% elsif field.cppClassName == 'String' %} + output->raw->{{ field.name }} = nullptr; + { + v8::Local maybeString = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeString.IsEmpty() && !maybeString->IsUndefined() && !maybeString->IsNull()) { + if (!maybeString->IsString()) { + return { + "Must pass string to {{ field.jsFunctionName }}" + }; + } + + Nan::Utf8String utf8String(maybeString.As()); + output->raw->{{ field.name }} = strdup(*utf8String); + } + } + {% elsif field.isCppClassIntType %} + { + v8::Local maybeNumber = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeNumber.IsEmpty() && !maybeNumber->IsUndefined() && !maybeNumber->IsNull()) { + if (!maybeNumber->IsNumber()) { + return { + "Must pass {{ field.cppClassName }} to {{ field.jsFunctionName }}" + }; + } + + output->raw->{{ field.name }} = maybeNumber->{{ field.cppClassName }}Value(); + } + } + {% else %} + { + v8::Local maybeNumber = nodegit::safeGetField(inputObj, "{{ field.jsFunctionName }}"); + if (!maybeNumber.IsEmpty() && !maybeNumber->IsUndefined() && !maybeNumber->IsNull()) { + if (!maybeNumber->IsNumber()) { + return { + "Must pass Int32 to {{ field.jsFunctionName }}" + }; + } + + output->raw->{{ field.name }} = static_cast<{{ field.cType }}>(maybeNumber->Int32Value(Nan::GetCurrentContext()).FromJust()); + } + } + {% endif %} {% endif %} {% endeach %} - InitializeTemplate(tpl); + {% if isExtendedStruct %} + (({{ cType }}_extended *)output->raw)->payload = (void *)output.get(); + {% endif %} - v8::Local _constructor_template = Nan::GetFunction(tpl).ToLocalChecked(); - constructor_template.Reset(_constructor_template); - Nan::Set(target, Nan::New("{{ jsClassName }}").ToLocalChecked(), _constructor_template); + return { + output + }; } -{% partial fieldAccessors . %} +{% partial configurableCallbacks %} // force base class template instantiation, to make sure we get all the // methods, statics, etc. -template class NodeGitWrapper<{{ cppClassName }}Traits>; +template class nodegit::ConfigurableClassWrapper<{{ cppClassName }}Traits>; diff --git a/generate/templates/templates/struct_header.h b/generate/templates/templates/struct_header.h index 568bcfc91d..ac05fb3543 100644 --- a/generate/templates/templates/struct_header.h +++ b/generate/templates/templates/struct_header.h @@ -1,18 +1,22 @@ +// generated from struct_header.h #ifndef {{ cppClassName|upper }}_H #define {{ cppClassName|upper }}_H #include #include -#include #include -#include #include "async_baton.h" +#include "async_worker.h" #include "callback_wrapper.h" +#include "context.h" #include "reference_counter.h" #include "nodegit_wrapper.h" +#include "configurable_class_wrapper.h" +#include "v8_helpers.h" extern "C" { #include + #include {% each cDependencies as dependency %} #include <{{ dependency }}> {% endeach %} @@ -32,77 +36,120 @@ struct {{ cType }}_extended { void* payload; }; {% endif %} -class {{ cppClassName }} : public NodeGitWrapper<{{ cppClassName }}Traits> { - // grant full access to base class - friend class NodeGitWrapper<{{ cppClassName }}Traits>; - public: - {{ cppClassName }}({{ cType }}* raw, bool selfFreeing, v8::Local owner = v8::Local()); - static void InitializeComponent (v8::Local target); - - {% each fields as field %} - {% if not field.ignore %} - {% if field.isCallbackFunction %} - static {{ field.return.type }} {{ field.name }}_cppCallback ( +{% if isReturnable %} + class {{ cppClassName }} : public NodeGitWrapper<{{ cppClassName }}Traits> { + // grant full access to base class + friend class NodeGitWrapper<{{ cppClassName }}Traits>; + + public: + {{ cppClassName }}({{ cType }}* raw, bool selfFreeing, v8::Local owner = v8::Local()); + {{ cppClassName }}(const {{ cppClassName }} &) = delete; + {{ cppClassName }}({{ cppClassName }} &&) = delete; + {{ cppClassName }} &operator=(const {{ cppClassName }} &) = delete; + {{ cppClassName }} &operator=({{ cppClassName }} &&) = delete; + static void InitializeComponent (v8::Local target, nodegit::Context *nodegitContext); + + private: + {{ cppClassName }}(); + ~{{ cppClassName }}(); + + void ConstructFields(); + + {% each fields as field %} + {% if not field.ignore %} + {% if not field.isEnum %} + {% if field.isLibgitType %} + Nan::Global {{ field.name }}; + {% endif %} + {% endif %} + + static NAN_GETTER(Get{{ field.cppFunctionName }}); + static NAN_SETTER(Set{{ field.cppFunctionName }}); + + {% endif %} + {% endeach %} + }; +{% endif %} + +class Configurable{{ cppClassName }} : public nodegit::ConfigurableClassWrapper<{{ cppClassName }}Traits> { + friend class nodegit::ConfigurableClassWrapper<{{ cppClassName }}Traits>; + +public: + static v8ConversionResult fromJavascript(nodegit::Context *nodegitContext, v8::Local input); + ~Configurable{{ cppClassName }}(); + + Configurable{{ cppClassName }}(const Configurable{{ cppClassName }} &) = delete; + Configurable{{ cppClassName }}(Configurable{{ cppClassName }} &&) = delete; + Configurable{{ cppClassName }} &operator=(const Configurable{{ cppClassName }} &) = delete; + Configurable{{ cppClassName }} &operator=(Configurable{{ cppClassName }} &&) = delete; + + {% each fields as field %} + {% if not field.ignore %} + {% if field.isCallbackFunction %} + static {{ field.return.type }} {{ field.jsFunctionName }}_cppCallback ( + {% each field.args|argsInfo as arg %} + {{ arg.cType }} {{ arg.name}} + {% if not arg.lastArg %} + , + {% endif %} + {% endeach %} + ); + + static void {{ field.jsFunctionName }}_cancelAsync(void *baton); + static void {{ field.jsFunctionName }}_async(void *baton); + static void {{ field.jsFunctionName }}_promiseCompleted(bool isFulfilled, nodegit::AsyncBaton *_baton, v8::Local result); + {% if field.return.type == 'void' %} + class {{ field.name|titleCase }}Baton : public nodegit::AsyncBatonWithNoResult { + public: {% each field.args|argsInfo as arg %} - {{ arg.cType }} {{ arg.name}} - {% if not arg.lastArg %} - , - {% endif %} + {{ arg.cType }} {{ arg.name }}; {% endeach %} - ); - - static void {{ field.name }}_async(void *baton); - static void {{ field.name }}_promiseCompleted(bool isFulfilled, AsyncBaton *_baton, v8::Local result); - {% if field.return.type == 'void' %} - struct {{ field.name|titleCase }}Baton : public AsyncBatonWithNoResult { - {% each field.args|argsInfo as arg %} - {{ arg.cType }} {{ arg.name }}; - {% endeach %} - - {{ field.name|titleCase }}Baton() - : AsyncBatonWithNoResult() { - } - }; - {% else %} - struct {{ field.name|titleCase }}Baton : public AsyncBatonWithResult<{{ field.return.type }}> { - {% each field.args|argsInfo as arg %} - {{ arg.cType }} {{ arg.name }}; - {% endeach %} - - {{ field.name|titleCase }}Baton(const {{ field.return.type }} &defaultResult) - : AsyncBatonWithResult<{{ field.return.type }}>(defaultResult) { - } - }; - {% endif %} - static {{ cppClassName }} * {{ field.name }}_getInstanceFromBaton ( - {{ field.name|titleCase }}Baton *baton); + + {{ field.name|titleCase }}Baton() + : nodegit::AsyncBatonWithNoResult() { + } + }; + {% else %} + class {{ field.name|titleCase }}Baton : public nodegit::AsyncBatonWithResult<{{ field.return.type }}> { + public: + {% each field.args|argsInfo as arg %} + {{ arg.cType }} {{ arg.name }}; + {% endeach %} + + {{ field.name|titleCase }}Baton(const {{ field.return.type }} &defaultResult) + : nodegit::AsyncBatonWithResult<{{ field.return.type }}>(defaultResult) { + } + }; {% endif %} + static Configurable{{ cppClassName }} * {{ field.jsFunctionName }}_getInstanceFromBaton ( + {{ field.name|titleCase }}Baton *baton); {% endif %} - {% endeach %} - - private: - {{ cppClassName }}(); - ~{{ cppClassName }}(); - - void ConstructFields(); - - {% each fields as field %} - {% if not field.ignore %} - {% if not field.isEnum %} - {% if field.isLibgitType %} - Nan::Persistent {{ field.name }}; - {% elsif field.isCallbackFunction %} - CallbackWrapper {{ field.name }}; - {% elsif field.payloadFor %} - Nan::Persistent {{ field.name }}; - {% endif %} - {% endif %} + {% endif %} + {% endeach %} - static NAN_GETTER(Get{{ field.cppFunctionName }}); - static NAN_SETTER(Set{{ field.cppFunctionName }}); +private: + Configurable{{ cppClassName }}(nodegit::Context *nodegitContext); + Configurable{{ cppClassName }}() = delete; + Nan::Global promiseError; + {% each fields as field %} + {% if not field.ignore %} + {% if not field.isEnum %} + {% if field.isClassType %} + {% if field.cppClassName == 'GitOid' %} + {%-- We do not need to generate anything here --%} + {% elsif field.cppClassName == 'GitStrarray' %} + {%-- We do not need to generate anything here --%} + {% else %} + Nan::Global {{ field.jsFunctionName }}; + {% endif %} + {% elsif field.isCallbackFunction %} + CallbackWrapper {{ field.jsFunctionName }}; + {% endif %} {% endif %} - {% endeach %} + {% endif %} + {% endeach %} + }; #endif diff --git a/guides/cloning/README.md b/guides/cloning/README.md index 2b1fa508a7..8b8390f236 100644 --- a/guides/cloning/README.md +++ b/guides/cloning/README.md @@ -85,7 +85,7 @@ to passthrough the certificate check. ``` javascript cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; } + certificateCheck: function() { return 0; } } }; ``` diff --git a/guides/cloning/gh-two-factor/README.md b/guides/cloning/gh-two-factor/README.md index a3b8bbb3fe..a6d24d40dd 100644 --- a/guides/cloning/gh-two-factor/README.md +++ b/guides/cloning/gh-two-factor/README.md @@ -101,7 +101,7 @@ to passthrough the certificate check. ``` javascript cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; } + certificateCheck: function() { return 0; } } }; ``` @@ -119,7 +119,7 @@ The `fetchOpts` object now looks like this: ``` javascript cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; }, + certificateCheck: function() { return 0; }, credentials: function() { return NodeGit.Cred.userpassPlaintextNew(GITHUB_TOKEN, "x-oauth-basic"); } diff --git a/guides/cloning/gh-two-factor/index.js b/guides/cloning/gh-two-factor/index.js index d723e52ccf..945aac3514 100644 --- a/guides/cloning/gh-two-factor/index.js +++ b/guides/cloning/gh-two-factor/index.js @@ -22,9 +22,9 @@ var cloneOptions = {}; // with libgit2 being able to verify certificates from GitHub. cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; }, + certificateCheck: function() { return 0; }, credentials: function() { - return NodeGit.Cred.userpassPlaintextNew(GITHUB_TOKEN, "x-oauth-basic"); + return NodeGit.Credential.userpassPlaintextNew(GITHUB_TOKEN, "x-oauth-basic"); } } }; diff --git a/guides/cloning/index.js b/guides/cloning/index.js index f6b7c7a370..ec455fbd7f 100644 --- a/guides/cloning/index.js +++ b/guides/cloning/index.js @@ -18,7 +18,7 @@ var cloneOptions = {}; // with libgit2 being able to verify certificates from GitHub. cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; } + certificateCheck: function() { return 0; } } }; diff --git a/guides/cloning/ssh-with-agent/README.md b/guides/cloning/ssh-with-agent/README.md index b2cfbe8ce0..46a72b823a 100644 --- a/guides/cloning/ssh-with-agent/README.md +++ b/guides/cloning/ssh-with-agent/README.md @@ -83,7 +83,7 @@ to passthrough the certificate check. ``` javascript cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; } + certificateCheck: function() { return 0; } } }; ``` @@ -102,7 +102,7 @@ The `fetchOpts` object now looks like this: ``` javascript cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; }, + certificateCheck: function() { return 0; }, credentials: function(url, userName) { return NodeGit.Cred.sshKeyFromAgent(userName); } diff --git a/guides/cloning/ssh-with-agent/index.js b/guides/cloning/ssh-with-agent/index.js index f3926392c6..655f07e242 100644 --- a/guides/cloning/ssh-with-agent/index.js +++ b/guides/cloning/ssh-with-agent/index.js @@ -17,13 +17,13 @@ var cloneOptions = {}; // with libgit2 being able to verify certificates from GitHub. cloneOptions.fetchOpts = { callbacks: { - certificateCheck: function() { return 1; }, + certificateCheck: function() { return 0; }, // Credentials are passed two arguments, url and username. We forward the // `userName` argument to the `sshKeyFromAgent` function to validate // authentication. credentials: function(url, userName) { - return NodeGit.Cred.sshKeyFromAgent(userName); + return NodeGit.Credential.sshKeyFromAgent(userName); } } }; diff --git a/guides/install/from-source/README.md b/guides/install/from-source/README.md index b14d7471a3..64bfce18b3 100644 --- a/guides/install/from-source/README.md +++ b/guides/install/from-source/README.md @@ -64,6 +64,16 @@ npm install nodegit --msvs_version=2013 # Or whatever version you've installed. ``` +### Electron and OpenSSL ### +A local version of OpenSSL is required when building for Electron. This is due to Electron using BoringSSL, as we are not able to link to it like we are OpenSSL in Node. + +`acquireOpenSSL.js` will attempt to download OpenSSL prebuilts from S3. If preferred, it can also be built locally by setting the environment variable `npm_config_openssl_bin_url=skip`. On macOS, this should Just Work(tm). On Windows, things are a little trickier. + +- We rely on the Visual Studio dev tools to be installed, specifically `vcvarsall.bat` to provide access to the tools. If this is not in the default location for VS2017, you'll need to `npm config set vcvarsall_path ` or set the environment variable `npm_config_vcvarsall_path` pointing to it. +- See [Compilation and Installation](https://wiki.openssl.org/index.php/Compilation_and_Installation#Windows) regarding required dependencies, specifically `Perl` (Strawberry Perl is known to work) and `NASM`. Make sure they're on the PATH. + +Alternatively, you can provide your own OpenSSL binaries and headers. These can either go in `vendor/openssl` (e.g. `/vendor/openssl/{lib,bin,include}` should exist) or in an external directory located by `npm config set openssl_dir ` or the environment variable `npm_config_openssl_dir`. Additionally, you can `npm config set openssl_bin_url ` or the environment variable `npm_config_openssl_bin_url` to download and extract prebuilt binaries (only supports tar.gz files). `npm config set openssl_bin_sha256 ` or the environment variable `npm_config_openssl_bin_sha256` can be set to verify the download. + ##### A note on environment variables in Windows ##### In many of the npm scripts (and examples above), things are run like `BUILD_ONLY=true npm install`. This sets the `BUILD_ONLY` environment variable diff --git a/lib/attr.js b/lib/attr.js new file mode 100644 index 0000000000..8ecdd5eeeb --- /dev/null +++ b/lib/attr.js @@ -0,0 +1,20 @@ +var util = require("util"); +var NodeGit = require("../"); + +NodeGit.Attr.STATES = {}; +var DEPRECATED_STATES = { + UNSPECIFIED_T: "UNSPECIFIED", + TRUE_T: "TRUE", + FALSE_T: "FALSE", + VALUE_T: "STRING" +}; + +Object.keys(DEPRECATED_STATES).forEach((key) => { + const newKey = DEPRECATED_STATES[key]; + Object.defineProperty(NodeGit.Attr.STATES, key, { + get: util.deprecate( + () => NodeGit.Attr.VALUE[newKey], + `Use NodeGit.Attr.VALUE.${newKey} instead of NodeGit.Attr.STATES.${key}.` + ) + }); +}); diff --git a/lib/blame.js b/lib/blame.js deleted file mode 100644 index 7893d0c87c..0000000000 --- a/lib/blame.js +++ /dev/null @@ -1,19 +0,0 @@ -var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; -var Blame = NodeGit.Blame; - -var _file = Blame.file; - -/** - * Retrieve the blame of a file - * - * @async - * @param {Repository} repo that contains the file - * @param {String} path to the file to get the blame of - * @param {BlameOptions} [options] Options for the blame - */ -Blame.file = function(repo, path, options) { - options = normalizeOptions(options, NodeGit.BlameOptions); - - return _file.call(this, repo, path, options); -}; diff --git a/lib/blob.js b/lib/blob.js index 073623fdd4..be63312485 100644 --- a/lib/blob.js +++ b/lib/blob.js @@ -1,8 +1,11 @@ +var util = require("util"); var NodeGit = require("../"); var Blob = NodeGit.Blob; var LookupWrapper = NodeGit.Utils.lookupWrapper; var TreeEntry = NodeGit.TreeEntry; +var _filteredContent = Blob.filteredContent; + /** * Retrieves the blob pointed to by the oid * @async @@ -40,3 +43,9 @@ Blob.prototype.filemode = function() { Blob.prototype.toString = function() { return this.content().toString(); }; + +Blob.filteredContent = util.deprecate( + _filteredContent, + "NodeGit.Blob.filteredContent is deprecated" + + " use NodeGit.Blob.prototype.filter instead." +); diff --git a/lib/buf.js b/lib/buf.js new file mode 100644 index 0000000000..aec6ba693d --- /dev/null +++ b/lib/buf.js @@ -0,0 +1,43 @@ +var util = require("util"); +var NodeGit = require("../"); +var Buf = NodeGit.Buf; + +var _set = Buf.prototype.set; +var _grow = Buf.prototype.grow; +var _isBinary = Buf.prototype.isBinary; +var _containsNul = Buf.prototype.containsNul; + +/** + * Sets the content of a GitBuf to a string. + * @param {string} The utf8 value to set in the buffer. + * The string will be null terminated. + */ +var _setString = function(content) { + const buf = Buffer.from(content + "\0", "utf8"); + this.set(buf, buf.length); +}; + +Buf.prototype.set = util.deprecate( + _set, + "NodeGit.Buf.prototype.set is deprecated." +); + +Buf.prototype.setString = util.deprecate( + _setString, + "NodeGit.Buf.prototype.setString is deprecated." +); + +Buf.prototype.grow = util.deprecate( + _grow, + "NodeGit.Buf.prototype.grow is deprecated." +); + +Buf.prototype.isBinary = util.deprecate( + _isBinary, + "NodeGit.Buf.prototype.isBinary is deprecated." +); + +Buf.prototype.containsNul = util.deprecate( + _containsNul, + "NodeGit.Buf.prototype.containsNul is deprecated." +); \ No newline at end of file diff --git a/lib/checkout.js b/lib/checkout.js deleted file mode 100644 index dd94b3cfb5..0000000000 --- a/lib/checkout.js +++ /dev/null @@ -1,51 +0,0 @@ -var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; - -var Checkout = NodeGit.Checkout; -var _head = Checkout.head; -var _index = Checkout.index; -var _tree = Checkout.tree; - -/** -* Patch head checkout to automatically coerce objects. -* -* @async -* @param {Repository} repo The repo to checkout head -* @param {CheckoutOptions} [options] Options for the checkout -* @return {Void} checkout complete -*/ -Checkout.head = function(url, options) { - options = normalizeOptions(options || {}, NodeGit.CheckoutOptions); - - return _head.call(this, url, options); -}; - -/** -* Patch index checkout to automatically coerce objects. -* -* @async -* @param {Repository} repo The repo to checkout an index -* @param {Index} index The index to checkout -* @param {CheckoutOptions} [options] Options for the checkout -* @return {Void} checkout complete -*/ -Checkout.index = function(repo, index, options) { - options = normalizeOptions(options || {}, NodeGit.CheckoutOptions); - - return _index.call(this, repo, index, options); -}; - -/** -* Patch tree checkout to automatically coerce objects. -* -* @async -* @param {Repository} repo -* @param {String|Tree|Commit|Reference} treeish -* @param {CheckoutOptions} [options] -* @return {Void} checkout complete -*/ -Checkout.tree = function(repo, treeish, options) { - options = normalizeOptions(options || {}, NodeGit.CheckoutOptions); - - return _tree.call(this, repo, treeish, options); -}; diff --git a/lib/cherrypick.js b/lib/cherrypick.js deleted file mode 100644 index cf003c2a5f..0000000000 --- a/lib/cherrypick.js +++ /dev/null @@ -1,73 +0,0 @@ -var NodeGit = require("../"); -var shallowClone = NodeGit.Utils.shallowClone; -var normalizeOptions = NodeGit.Utils.normalizeOptions; - -var Cherrypick = NodeGit.Cherrypick; -var _cherrypick = Cherrypick.cherrypick; -var _commit = Cherrypick.commit; - -/** -* Cherrypick a commit and, changing the index and working directory -* -* @async -* @param {Repository} repo The repo to checkout head -* @param {Commit} commit The commit to cherrypick -* @param {CherrypickOptions} [options] Options for the cherrypick -* @return {int} 0 on success, -1 on failure -*/ -Cherrypick.cherrypick = function(repo, commit, options) { - var mergeOpts; - var checkoutOpts; - - if (options) { - options = shallowClone(options); - mergeOpts = options.mergeOpts; - checkoutOpts = options.checkoutOpts; - delete options.mergeOpts; - delete options.checkoutOpts; - } - - options = normalizeOptions(options, NodeGit.CherrypickOptions); - - if (mergeOpts) { - options.mergeOpts = - normalizeOptions(mergeOpts, NodeGit.MergeOptions); - } - - if (checkoutOpts) { - options.checkoutOpts = - normalizeOptions(checkoutOpts, NodeGit.CheckoutOptions); - } - - return _cherrypick.call(this, repo, commit, options); -}; - -/** -* Cherrypicks the given commit against "our" commit, producing an index that -* reflects the result of the cherrypick. The index is not backed by a repo. -* -* @async -* @param {Repository} repo The repo to cherrypick commits -* @param {Commit} cherrypick_commit The commit to cherrypick -* @param {Commit} our_commit The commit to revert against -* @param {int} mainline The parent of the revert commit (1 or -* 2) if it's a merge, 0 otherwise -* @param {MergeOptions} [merge_options] Merge options for the cherrypick -* @return {int} 0 on success, -1 on failure -*/ -Cherrypick.commit = function( - repo, - cherrypick_commit, - our_commit, - mainline, - merge_options) { - merge_options = normalizeOptions(merge_options, NodeGit.MergeOptions); - - return _commit.call( - this, - repo, - cherrypick_commit, - our_commit, - mainline, - merge_options); -}; diff --git a/lib/clone.js b/lib/clone.js deleted file mode 100644 index c6b6be5991..0000000000 --- a/lib/clone.js +++ /dev/null @@ -1,33 +0,0 @@ -var NodeGit = require("../"); -var shallowClone = NodeGit.Utils.shallowClone; -var normalizeFetchOptions = NodeGit.Utils.normalizeFetchOptions; -var normalizeOptions = NodeGit.Utils.normalizeOptions; - -var Clone = NodeGit.Clone; -var _clone = Clone.clone; - -/** - * Patch repository cloning to automatically coerce objects. - * - * @async - * @param {String} url url of the repository - * @param {String} local_path local path to store repository - * @param {CloneOptions} [options] - * @return {Repository} repo - */ -Clone.clone = function(url, local_path, options) { - var fetchOpts = normalizeFetchOptions(options && options.fetchOpts); - - if (options) { - options = shallowClone(options); - delete options.fetchOpts; - } - - options = normalizeOptions(options, NodeGit.CloneOptions); - - if (options) { - options.fetchOpts = fetchOpts; - } - - return _clone.call(this, url, local_path, options); -}; diff --git a/lib/commit.js b/lib/commit.js index 232d771b0b..8eb561e585 100644 --- a/lib/commit.js +++ b/lib/commit.js @@ -1,9 +1,11 @@ var events = require("events"); +var fp = require("lodash/fp"); var NodeGit = require("../"); var Commit = NodeGit.Commit; var LookupWrapper = NodeGit.Utils.lookupWrapper; var _amend = Commit.prototype.amend; +var _parent = Commit.prototype.parent; /** * Retrieves the commit pointed to by the oid @@ -14,6 +16,19 @@ var _amend = Commit.prototype.amend; */ Commit.lookup = LookupWrapper(Commit); +/** + * @async + * @param {Number} n + * @return {Commit} + */ +Commit.prototype.parent = function(n) { + var repo = this.repo; + return _parent.call(this, n).then(p => { + p.repo = repo; + return p; + }); +}; + /** * Amend a commit * @async @@ -23,10 +38,10 @@ Commit.lookup = LookupWrapper(Commit); * @param {String} message_encoding * @param {String} message * @param {Tree|Oid} tree - * @param {Oid} callback + * @return {Oid} */ Commit.prototype.amend = function ( - updateRef, author, committer, message_encoding, message, tree, callback) { + updateRef, author, committer, message_encoding, message, tree) { var repo = this.repo; var _this = this; var treePromise; @@ -50,6 +65,167 @@ Commit.prototype.amend = function ( }); }; +/** + * Amend a commit with the given signature + * @async + * @param {String} updateRef + * @param {Signature} author + * @param {Signature} committer + * @param {String} messageEncoding + * @param {String} message + * @param {Tree|Oid} tree + * @param {Function} onSignature Callback to be called with string to be signed + * @return {Oid} +*/ +Commit.prototype.amendWithSignature = function( + updateRef, + author, + committer, + messageEncoding, + message, + tree, + onSignature +) { + let repo = this.repo; + let parentOids = this.parents(); + let _this = this; + let promises = []; + + if (tree instanceof NodeGit.Oid) { + promises.push(repo.getTree(tree)); + } else { + promises.push(Promise.resolve(tree)); + } + + parentOids.forEach(function (parentOid) { + promises.push(repo.getCommit(parentOid)); + }); + + let treeObject; + let parents; + let commitContent; + let commit; + let skippedSigning; + let resolvedAuthor; + let resolvedCommitter; + let resolvedMessageEncoding; + let resolvedMessage; + let resolvedTree; + + let createCommitPromise = Promise.all(promises) + .then(function(results) { + treeObject = fp.head(results); + parents = fp.tail(results); + return _this.getTree(); + }) + .then(function(commitTreeResult) { + let commitTree = commitTreeResult; + + let truthyArgs = fp.omitBy( + fp.isNil, + { + author, + committer, + messageEncoding, + message, + tree: treeObject + } + ); + + let commitFields = { + author: _this.author(), + committer: _this.committer(), + messageEncoding: _this.messageEncoding(), + message: _this.message(), + tree: commitTree + }; + + ({ + author: resolvedAuthor, + committer: resolvedCommitter, + messageEncoding: resolvedMessageEncoding, + message: resolvedMessage, + tree: resolvedTree + } = fp.assign( + commitFields, + truthyArgs + )); + + return Commit.createBuffer( + repo, + resolvedAuthor, + resolvedCommitter, + resolvedMessageEncoding, + resolvedMessage, + resolvedTree, + parents.length, + parents + ); + }) + .then(function(commitContentResult) { + commitContent = commitContentResult; + if (!commitContent.endsWith("\n")) { + commitContent += "\n"; + } + return onSignature(commitContent); + }) + .then(function({ code, field, signedData }) { + switch (code) { + case NodeGit.Error.CODE.OK: + return Commit.createWithSignature( + repo, + commitContent, + signedData, + field + ); + case NodeGit.Error.CODE.PASSTHROUGH: + skippedSigning = true; + return Commit.create( + repo, + updateRef, + resolvedAuthor, + resolvedCommitter, + resolvedMessageEncoding, + resolvedMessage, + resolvedTree, + parents.length, + parents + ); + default: { + const error = new Error( + `Commit.amendWithSignature threw with error code ${code}` + ); + error.errno = code; + throw error; + } + } + }); + + if (!updateRef) { + return createCommitPromise; + } + + return createCommitPromise + .then(function(commitOid) { + if (skippedSigning) { + return commitOid; + } + + return repo.getCommit(commitOid) + .then(function(commitResult) { + commit = commitResult; + return repo.getReference(updateRef); + }).then(function(ref) { + return ref.setTarget( + commitOid, + `commit (amend): ${commit.summary()}` + ); + }).then(function() { + return commitOid; + }); + }); +}; + /** * Retrieve the commit time as a Date object. * @return {Date} @@ -63,11 +239,10 @@ Commit.prototype.date = function() { * and its parent(s). * * @async - * @param {Function} callback * @return {Array} an array of diffs */ -Commit.prototype.getDiff = function(callback) { - return this.getDiffWithOptions(null, callback); +Commit.prototype.getDiff = function() { + return this.getDiffWithOptions(null); }; /** @@ -76,10 +251,9 @@ Commit.prototype.getDiff = function(callback) { * * @async * @param {Object} options - * @param {Function} callback * @return {Array} an array of diffs */ -Commit.prototype.getDiffWithOptions = function(options, callback) { +Commit.prototype.getDiffWithOptions = function(options) { var commit = this; return commit.getTree().then(function(thisTree) { @@ -97,13 +271,7 @@ Commit.prototype.getDiffWithOptions = function(options, callback) { return Promise.all(diffs); }); - }).then(function(diffs) { - if (typeof callback === "function") { - callback(null, diffs); - } - - return diffs; - }, callback); + }); }; /** @@ -114,16 +282,10 @@ Commit.prototype.getDiffWithOptions = function(options, callback) { * @param {String} path * @return {TreeEntry} */ -Commit.prototype.getEntry = function(path, callback) { +Commit.prototype.getEntry = function(path) { return this.getTree().then(function(tree) { - return tree.getEntry(path).then(function(entry) { - if (typeof callback === "function") { - callback(null, entry); - } - - return entry; - }); - }, callback); + return tree.getEntry(path); + }); }; /** @@ -131,17 +293,11 @@ Commit.prototype.getEntry = function(path, callback) { * * @async * @param {number} limit Optional amount of parents to return. - * @param {Function} callback * @return {Array} array of commits */ -Commit.prototype.getParents = function(limit, callback) { +Commit.prototype.getParents = function(limit) { var parents = []; - // Shift arguments. - if (typeof limit === "function") { - callback = limit; - } - // If no limit was set, default to the maximum parents. limit = typeof limit === "number" ? limit : this.parentcount(); limit = Math.min(limit, this.parentcount()); @@ -154,13 +310,7 @@ Commit.prototype.getParents = function(limit, callback) { } // Wait for all parents to complete, before returning. - return Promise.all(parents).then(function(parents) { - if (typeof callback === "function") { - callback(null, parents); - } - - return parents; - }, callback); + return Promise.all(parents); }; /** @@ -186,8 +336,8 @@ Commit.prototype.getSignature = function(field) { * @async * @return {Tree} */ -Commit.prototype.getTree = function(callback) { - return this.repo.getTree(this.treeId(), callback); +Commit.prototype.getTree = function() { + return this.repo.getTree(this.treeId()); }; /** @@ -232,6 +382,21 @@ Commit.prototype.history = function() { return event; }; +/** + * Get the specified parent of the commit. + * + * @param {number} the position of the parent, starting from 0 + * @async + * @return {Commit} the parent commit at the specified position + */ +Commit.prototype.parent = function (id) { + var repository = this.repo; + return _parent.call(this, id).then(function(parent) { + parent.repo = repository; + return parent; + }); +}; + /** * Retrieve the commit's parent shas. * diff --git a/lib/config.js b/lib/config.js index 1527ede7be..37b792da85 100644 --- a/lib/config.js +++ b/lib/config.js @@ -1,8 +1,48 @@ +var util = require("util"); var NodeGit = require("../"); var Config = NodeGit.Config; +var _getBool = Config.prototype.getBool; +var _setBool = Config.prototype.setBool; + +/** + * @async + * @param {String} name The variable's name + * @return {Boolean} The variable's value + */ +Config.prototype.getBool = function(name) { + return _getBool.call(this, name) + .then(result => Boolean(result)); +}; + +/** + * @async + * @param {String} name The variable's name + * @param {Boolean} name The variable's value + * @return {Number} 0 or an error code + */ +Config.prototype.setBool = function(name, value) { + return _setBool.call(this, name, value ? 1 : 0); +}; + // Backwards compatibility. Config.prototype.getString = function() { return this.getStringBuf.apply(this, arguments); }; + +NodeGit.Enums.CVAR = {}; +var DEPRECATED_CVAR_ENUMS = [ + "FALSE", + "TRUE", + "INT32", + "STRING" +]; +DEPRECATED_CVAR_ENUMS.forEach((key) => { + Object.defineProperty(NodeGit.Enums.CVAR, key, { + get: util.deprecate( + () => Config.MAP[key], + `Use NodeGit.Config.MAP.${key} instead of NodeGit.Enums.CVAR.${key}.` + ) + }); +}); diff --git a/lib/credential.js b/lib/credential.js new file mode 100644 index 0000000000..af1a59125e --- /dev/null +++ b/lib/credential.js @@ -0,0 +1,34 @@ +var util = require("util"); +var NodeGit = require("../"); + +var Credential = NodeGit.Credential; + +var deprecatedFn = (method) => + util.deprecate( + Credential[method].bind(Credential), + `Use NodeGit.Credential.${method} instead of NodeGit.Cred.${method}` + ); + +var createCredTypeDeprecationMessage = type => + `Use NodeGit.Credential.TYPE.${type} instead of NodeGit.Cred.TYPE.${type}`; + +NodeGit.Cred = { + defaultNew: deprecatedFn("defaultNew"), + sshKeyFromAgent: deprecatedFn("sshKeyFromAgent"), + sshKeyNew: deprecatedFn("sshKeyNew"), + sshKeyMemoryNew: deprecatedFn("sshKeyMemoryNew"), + usernameNew: deprecatedFn("usernameNew"), + userpassPlaintextNew: deprecatedFn("userpassPlaintextNew"), + TYPE: Object.keys(Credential.TYPE).reduce( + (type, key) => { + Object.defineProperty(type, key, { + get: util.deprecate( + () => Credential.TYPE[key], + createCredTypeDeprecationMessage(type) + ) + }); + return type; + }, + {} + ) +}; diff --git a/lib/deprecated/structs/ApplyOptions.js b/lib/deprecated/structs/ApplyOptions.js new file mode 100644 index 0000000000..cd9f22ca07 --- /dev/null +++ b/lib/deprecated/structs/ApplyOptions.js @@ -0,0 +1,3 @@ +module.exports = function() { + this.flags = 0; +}; diff --git a/lib/deprecated/structs/BlameOptions.js b/lib/deprecated/structs/BlameOptions.js new file mode 100644 index 0000000000..8ffd415de9 --- /dev/null +++ b/lib/deprecated/structs/BlameOptions.js @@ -0,0 +1,6 @@ +module.exports = function() { + this.flags = 0; + this.minMatchCharacters = 0; + this.minLine = 0; + this.maxLine = 0; +}; diff --git a/lib/deprecated/structs/BlobFilterOptions.js b/lib/deprecated/structs/BlobFilterOptions.js new file mode 100644 index 0000000000..df9a3d762b --- /dev/null +++ b/lib/deprecated/structs/BlobFilterOptions.js @@ -0,0 +1,3 @@ +module.exports = function() { + this.flags = 1; +}; diff --git a/lib/deprecated/structs/CheckoutOptions.js b/lib/deprecated/structs/CheckoutOptions.js new file mode 100644 index 0000000000..89e28d0939 --- /dev/null +++ b/lib/deprecated/structs/CheckoutOptions.js @@ -0,0 +1,8 @@ +module.exports = function() { + this.checkoutStrategy = 1; + this.disableFilters = 0; + this.dirMode = 0; + this.fileMode = 0; + this.fileOpenFlags = 0; + this.notifyFlags = 0; +}; diff --git a/lib/deprecated/structs/CherrypickOptions.js b/lib/deprecated/structs/CherrypickOptions.js new file mode 100644 index 0000000000..fce5efa102 --- /dev/null +++ b/lib/deprecated/structs/CherrypickOptions.js @@ -0,0 +1,5 @@ +module.exports = function(NodeGit) { + this.checkoutOpts = new NodeGit.CheckoutOptions(); + this.mainline = 0; + this.mergeOpts = new NodeGit.MergeOptions(); +}; diff --git a/lib/deprecated/structs/CloneOptions.js b/lib/deprecated/structs/CloneOptions.js new file mode 100644 index 0000000000..84deb60da8 --- /dev/null +++ b/lib/deprecated/structs/CloneOptions.js @@ -0,0 +1,6 @@ +module.exports = function(NodeGit) { + this.bare = 0; + this.checkoutOpts = new NodeGit.CheckoutOptions(); + this.fetchOpts = new NodeGit.FetchOptions(); + this.local = 0; +}; diff --git a/lib/deprecated/structs/DescribeFormatOptions.js b/lib/deprecated/structs/DescribeFormatOptions.js new file mode 100644 index 0000000000..6e9d25031a --- /dev/null +++ b/lib/deprecated/structs/DescribeFormatOptions.js @@ -0,0 +1,4 @@ +module.exports = function() { + this.abbreviatedSize = 7; + this.alwaysUseLongFormat = 0; +}; diff --git a/lib/deprecated/structs/DescribeOptions.js b/lib/deprecated/structs/DescribeOptions.js new file mode 100644 index 0000000000..a4c29a2a7b --- /dev/null +++ b/lib/deprecated/structs/DescribeOptions.js @@ -0,0 +1,6 @@ +module.exports = function() { + this.describeStrategy = 0; + this.maxCandidatesTags = 10; + this.onlyFollowFirstParent = 0; + this.showCommitOidAsFallback = 0; +}; diff --git a/lib/deprecated/structs/DiffFindOptions.js b/lib/deprecated/structs/DiffFindOptions.js new file mode 100644 index 0000000000..459bc0ac16 --- /dev/null +++ b/lib/deprecated/structs/DiffFindOptions.js @@ -0,0 +1,8 @@ +module.exports = function() { + this.breakRewriteThreshold = 0; + this.copyThreshold = 0; + this.flags = 0; + this.renameFromRewriteThreshold = 0; + this.renameLimit = 0; + this.renameThreshold = 0; +}; diff --git a/lib/deprecated/structs/DiffOptions.js b/lib/deprecated/structs/DiffOptions.js new file mode 100644 index 0000000000..eeb6c5c3a1 --- /dev/null +++ b/lib/deprecated/structs/DiffOptions.js @@ -0,0 +1,8 @@ +module.exports = function() { + this.contextLines = 3; + this.flags = 0; + this.idAbbrev = 0; + this.ignoreSubmodules = -1; + this.interhunkLines = 0; + this.maxSize = 0; +}; diff --git a/lib/deprecated/structs/FetchOptions.js b/lib/deprecated/structs/FetchOptions.js new file mode 100644 index 0000000000..cd3f890dc2 --- /dev/null +++ b/lib/deprecated/structs/FetchOptions.js @@ -0,0 +1,7 @@ +module.exports = function(NodeGit) { + this.callbacks = new NodeGit.RemoteCallbacks(); + this.downloadTags = 0; + this.proxyOpts = new NodeGit.ProxyOptions(); + this.prune = 0; + this.updateFetchhead = 1; +}; diff --git a/lib/deprecated/structs/MergeFileInput.js b/lib/deprecated/structs/MergeFileInput.js new file mode 100644 index 0000000000..0b77f13830 --- /dev/null +++ b/lib/deprecated/structs/MergeFileInput.js @@ -0,0 +1,4 @@ +module.exports = function() { + this.mode = 0; + this.size = 0; +}; diff --git a/lib/deprecated/structs/MergeFileOptions.js b/lib/deprecated/structs/MergeFileOptions.js new file mode 100644 index 0000000000..fd7101514d --- /dev/null +++ b/lib/deprecated/structs/MergeFileOptions.js @@ -0,0 +1,5 @@ +module.exports = function() { + this.favor = 0; + this.flags = 0; + this.markerSize = 0; +}; diff --git a/lib/deprecated/structs/MergeOptions.js b/lib/deprecated/structs/MergeOptions.js new file mode 100644 index 0000000000..3c73243a27 --- /dev/null +++ b/lib/deprecated/structs/MergeOptions.js @@ -0,0 +1,8 @@ +module.exports = function() { + this.fileFavor = 0; + this.fileFlags = 0; + this.flags = 1; + this.recursionLimit = 0; + this.renameThreshold = 0; + this.targetLimit = 0; +}; diff --git a/lib/deprecated/structs/ProxyOptions.js b/lib/deprecated/structs/ProxyOptions.js new file mode 100644 index 0000000000..d1b3260bae --- /dev/null +++ b/lib/deprecated/structs/ProxyOptions.js @@ -0,0 +1,3 @@ +module.exports = function() { + this.type = 0; +}; diff --git a/lib/deprecated/structs/PushOptions.js b/lib/deprecated/structs/PushOptions.js new file mode 100644 index 0000000000..bab5bc14f9 --- /dev/null +++ b/lib/deprecated/structs/PushOptions.js @@ -0,0 +1,5 @@ +module.exports = function(NodeGit) { + this.callbacks = new NodeGit.RemoteCallbacks(); + this.pbParallelism = 1; + this.proxyOpts = new NodeGit.ProxyOptions(); +}; diff --git a/lib/deprecated/structs/RebaseOptions.js b/lib/deprecated/structs/RebaseOptions.js new file mode 100644 index 0000000000..414b296b5a --- /dev/null +++ b/lib/deprecated/structs/RebaseOptions.js @@ -0,0 +1,6 @@ +module.exports = function(NodeGit) { + this.checkoutOptions = new NodeGit.CheckoutOptions(); + this.inmemory = 0; + this.mergeOptions = new NodeGit.MergeOptions(); + this.quiet = 0; +}; diff --git a/lib/deprecated/structs/RemoteCreateOptions.js b/lib/deprecated/structs/RemoteCreateOptions.js new file mode 100644 index 0000000000..cd9f22ca07 --- /dev/null +++ b/lib/deprecated/structs/RemoteCreateOptions.js @@ -0,0 +1,3 @@ +module.exports = function() { + this.flags = 0; +}; diff --git a/lib/deprecated/structs/RepositoryInitOptions.js b/lib/deprecated/structs/RepositoryInitOptions.js new file mode 100644 index 0000000000..39657b17ea --- /dev/null +++ b/lib/deprecated/structs/RepositoryInitOptions.js @@ -0,0 +1,4 @@ +module.exports = function() { + this.flags = 0; + this.mode = 0; +}; diff --git a/lib/deprecated/structs/RevertOptions.js b/lib/deprecated/structs/RevertOptions.js new file mode 100644 index 0000000000..fce5efa102 --- /dev/null +++ b/lib/deprecated/structs/RevertOptions.js @@ -0,0 +1,5 @@ +module.exports = function(NodeGit) { + this.checkoutOpts = new NodeGit.CheckoutOptions(); + this.mainline = 0; + this.mergeOpts = new NodeGit.MergeOptions(); +}; diff --git a/lib/deprecated/structs/StashApplyOptions.js b/lib/deprecated/structs/StashApplyOptions.js new file mode 100644 index 0000000000..a7a9fcc839 --- /dev/null +++ b/lib/deprecated/structs/StashApplyOptions.js @@ -0,0 +1,4 @@ +module.exports = function(NodeGit) { + this.checkoutOptions = new NodeGit.CheckoutOptions(); + this.flags = 0; +}; diff --git a/lib/deprecated/structs/StatusOptions.js b/lib/deprecated/structs/StatusOptions.js new file mode 100644 index 0000000000..d7c35c0f4d --- /dev/null +++ b/lib/deprecated/structs/StatusOptions.js @@ -0,0 +1,4 @@ +module.exports = function() { + this.flags = 0; + this.show = 0; +}; diff --git a/lib/deprecated/structs/SubmoduleUpdateOptions.js b/lib/deprecated/structs/SubmoduleUpdateOptions.js new file mode 100644 index 0000000000..8dad87f119 --- /dev/null +++ b/lib/deprecated/structs/SubmoduleUpdateOptions.js @@ -0,0 +1,5 @@ +module.exports = function(NodeGit) { + this.allowFetch = 1; + this.checkoutOpts = new NodeGit.CheckoutOptions(); + this.fetchOpts = new NodeGit.FetchOptions(); +}; diff --git a/lib/diff.js b/lib/diff.js index b393595f8e..2ae41a62b6 100644 --- a/lib/diff.js +++ b/lib/diff.js @@ -1,16 +1,8 @@ var NodeGit = require("../"); var Diff = NodeGit.Diff; -var normalizeOptions = NodeGit.Utils.normalizeOptions; var Patch = NodeGit.Patch; var _blobToBuffer = Diff.blobToBuffer; -var _indexToWorkdir = Diff.indexToWorkdir; -var _treeToIndex = Diff.treeToIndex; -var _treeToTree = Diff.treeToTree; -var _treeToWorkdir = Diff.treeToWorkdir; -var _treeToWorkdirWithIndex = Diff.treeToWorkdirWithIndex; - -var _findSimilar = Diff.prototype.findSimilar; /** * Directly run a diff between a blob and a buffer. @@ -48,8 +40,6 @@ Diff.blobToBuffer= function( bufferLength = !buffer ? 0 : Buffer.byteLength(buffer, "utf8"); } - opts = normalizeOptions(opts, NodeGit.DiffOptions); - return _blobToBuffer.call( this, old_blob, @@ -65,42 +55,6 @@ Diff.blobToBuffer= function( null); }; -// Override Diff.indexToWorkdir to normalize opts -Diff.indexToWorkdir = function(repo, index, opts) { - opts = normalizeOptions(opts, NodeGit.DiffOptions); - return _indexToWorkdir(repo, index, opts); -}; - -// Override Diff.treeToIndex to normalize opts -Diff.treeToIndex = function(repo, tree, index, opts) { - opts = normalizeOptions(opts, NodeGit.DiffOptions); - return _treeToIndex(repo, tree, index, opts); -}; - -// Override Diff.treeToTree to normalize opts -Diff.treeToTree = function(repo, from_tree, to_tree, opts) { - opts = normalizeOptions(opts, NodeGit.DiffOptions); - return _treeToTree(repo, from_tree, to_tree, opts); -}; - -// Override Diff.treeToWorkdir to normalize opts -Diff.treeToWorkdir = function(repo, tree, opts) { - opts = normalizeOptions(opts, NodeGit.DiffOptions); - return _treeToWorkdir(repo, tree, opts); -}; - -// Override Diff.treeToWorkdir to normalize opts -Diff.treeToWorkdirWithIndex = function(repo, tree, opts) { - opts = normalizeOptions(opts, NodeGit.DiffOptions); - return _treeToWorkdirWithIndex(repo, tree, opts); -}; - -// Override Diff.findSimilar to normalize opts -Diff.prototype.findSimilar = function(opts) { - opts = normalizeOptions(opts, NodeGit.DiffFindOptions); - return _findSimilar.call(this, opts); -}; - /** * Retrieve patches in this difflist * @@ -108,6 +62,6 @@ Diff.prototype.findSimilar = function(opts) { * @return {Array} a promise that resolves to an array of * ConvenientPatches */ -Diff.prototype.patches = function() { - return Patch.convenientFromDiff(this); +Diff.prototype.patches = function(idxs) { + return Patch.convenientFromDiff(this, idxs); }; diff --git a/lib/diff_line.js b/lib/diff_line.js index 83e4344013..f856f07c7d 100644 --- a/lib/diff_line.js +++ b/lib/diff_line.js @@ -13,7 +13,7 @@ DiffLine.prototype.content = function() { } if (!this._cache.content) { - this._cache.content = new Buffer(this.rawContent()) + this._cache.content = Buffer.from(this.rawContent()) .slice(0, this.contentLen()) .toString("utf8"); } diff --git a/lib/error.js b/lib/error.js new file mode 100644 index 0000000000..819299681c --- /dev/null +++ b/lib/error.js @@ -0,0 +1,17 @@ +var util = require("util"); +var NodeGit = require("../"); + +// Deprecated ----------------------------------------------------------------- + +// In 0.28.0 git_error was majorly refactored to have better naming in libgit2 +// We will continue to support the old enum entries but with a deprecation +// warning as they will go away soon. +Object.keys(NodeGit.Error.CODE).forEach((key) => { + Object.defineProperty(NodeGit.Error.CODE, `GITERR_${key}`, { + get: util.deprecate( + () => NodeGit.Error.CODE[key], + `Use NodeGit.Error.CODE.${key} instead of ` + + `NodeGit.Error.CODE.GETERR_${key}.` + ) + }); +}); diff --git a/lib/filter_registry.js b/lib/filter_registry.js index 76dfba5733..cfa17415e0 100644 --- a/lib/filter_registry.js +++ b/lib/filter_registry.js @@ -1,42 +1,22 @@ var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; var FilterRegistry = NodeGit.FilterRegistry; var _register = FilterRegistry.register; -var _unregister = FilterRegistry.unregister; // register should add filter by name to dict and return // Override FilterRegistry.register to normalize Filter -FilterRegistry.register = function(name, filter, priority, callback) { +FilterRegistry.register = function(name, filter, priority) { // setting default value of attributes if (filter.attributes === undefined) { filter.attributes = ""; } - filter = normalizeOptions(filter, NodeGit.Filter); - if (!filter.check || !filter.apply) { - return callback(new Error( + return Promise.reject(new Error( "ERROR: please provide check and apply callbacks for filter" )); } - return _register(name, filter, priority) - .then(function(result) { - if (typeof callback === "function") { - callback(null, result); - } - return result; - }, callback); -}; - -FilterRegistry.unregister = function(name, callback) { - return _unregister(name) - .then(function(result) { - if (typeof callback === "function") { - callback(null, result); - } - return result; - }, callback); + return _register(name, filter, priority); }; diff --git a/lib/graph.js b/lib/graph.js new file mode 100644 index 0000000000..7371410076 --- /dev/null +++ b/lib/graph.js @@ -0,0 +1,15 @@ +var NodeGit = require("../"); + +var Graph = NodeGit.Graph; + +var _reachableFromAny = Graph.reachableFromAny; + +/** + * Determine if a commit is reachable from any of a list of commits by following parent edges. + * @param {repository} the repository where the commits exist + * @param {commit} a previously loaded commit + * @param {descendant_array} oids of the commits + */ +Graph.reachableFromAny = function(repository, commit, descendant_array) { + return _reachableFromAny(repository, commit, descendant_array, descendant_array.length); +}; diff --git a/lib/index.js b/lib/index.js index cb87784d3c..ad37cea782 100644 --- a/lib/index.js +++ b/lib/index.js @@ -1,3 +1,4 @@ +var util = require("util"); var NodeGit = require("../"); var Index = NodeGit.Index; @@ -32,3 +33,71 @@ Index.prototype.removeAll = function(pathspec, matchedCallback) { Index.prototype.updateAll = function(pathspec, matchedCallback) { return _updateAll.call(this, pathspec || "*", matchedCallback, null); }; + +// Deprecated ----------------------------------------------------------------- + +NodeGit.Index.CAP = {}; +Object.keys(NodeGit.Index.CAPABILITY).forEach((key) => { + Object.defineProperty(NodeGit.Index.CAP, key, { + get: util.deprecate( + () => NodeGit.Index.CAPABILITY[key], + `Use NodeGit.Index.CAPABILITY.${key} instead of ` + + `NodeGit.Index.CAP.${key}.` + ) + }); +}); + +NodeGit.Enums.INDXENTRY_FLAG = {}; +Object.defineProperty(NodeGit.Enums.INDXENTRY_FLAG, "IDXENTRY_EXTENDED", { + get: util.deprecate( + () => NodeGit.Index.ENTRY_FLAG.ENTRY_EXTENDED, + "Use NodeGit.Index.ENTRY_FLAG.ENTRY_EXTENDED instead of " + + "NodeGit.Enums.INDXENTRY_FLAG.IDXENTRY_EXTENDED." + ) +}); +Object.defineProperty(NodeGit.Enums.INDXENTRY_FLAG, "IDXENTRY_VALID", { + get: util.deprecate( + () => NodeGit.Index.ENTRY_FLAG.ENTRY_VALID, + "Use NodeGit.Index.ENTRY_FLAG.ENTRY_VALID instead of " + + "NodeGit.Enums.INDXENTRY_FLAG.IDXENTRY_VALID." + ) +}); + +NodeGit.Enums.IDXENTRY_EXTENDED_FLAG = {}; +var EXTENDED_FLAGS_MAP = { + IDXENTRY_INTENT_TO_ADD: "ENTRY_INTENT_TO_ADD", + IDXENTRY_SKIP_WORKTREE: "ENTRY_SKIP_WORKTREE", + S: "S", + IDXENTRY_UPTODATE: "ENTRY_UPTODATE" +}; +Object.keys(EXTENDED_FLAGS_MAP).forEach((key) => { + const newKey = EXTENDED_FLAGS_MAP[key]; + Object.defineProperty(NodeGit.Enums.IDXENTRY_EXTENDED_FLAG, key, { + get: util.deprecate( + () => NodeGit.Index.ENTRY_EXTENDED_FLAG[newKey], + `Use NodeGit.Index.ENTRY_EXTENDED_FLAG.${newKey} instead of ` + + `NodeGit.Enums.IDXENTRY_EXTENDED_FLAG.${key}.` + ) + }); +}); + +var DEPRECATED_EXTENDED_FLAGS = { + IDXENTRY_EXTENDED2: 32768, + IDXENTRY_UPDATE: 1, + IDXENTRY_REMOVE: 2, + IDXENTRY_ADDED: 8, + IDXENTRY_HASHED: 16, + IDXENTRY_UNHASHED: 32, + IDXENTRY_WT_REMOVE: 64, + IDXENTRY_CONFLICTED: 128, + IDXENTRY_UNPACKED: 256, + IDXENTRY_NEW_SKIP_WORKTREE: 512, +}; +Object.keys(DEPRECATED_EXTENDED_FLAGS).forEach((key) => { + Object.defineProperty(NodeGit.Enums.IDXENTRY_EXTENDED_FLAG, key, { + get: util.deprecate( + () => DEPRECATED_EXTENDED_FLAGS[key], + "LibGit2 has removed this flag for public usage." + ) + }); +}); diff --git a/lib/merge.js b/lib/merge.js index e9b9b5c18b..549bbad919 100644 --- a/lib/merge.js +++ b/lib/merge.js @@ -1,5 +1,4 @@ var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; var Merge = NodeGit.Merge; var _commits = Merge.commits; @@ -15,8 +14,6 @@ var _merge = Merge.merge; * @param {MergeOptions} [options] The merge tree options (null for default) */ Merge.commits = function(repo, ourCommit, theirCommit, options) { - options = normalizeOptions(options, NodeGit.MergeOptions); - return Promise.all([ repo.getCommit(ourCommit), repo.getCommit(theirCommit) @@ -35,9 +32,6 @@ Merge.commits = function(repo, ourCommit, theirCommit, options) { * (null for default) */ Merge.merge = function(repo, theirHead, mergeOpts, checkoutOpts) { - mergeOpts = normalizeOptions(mergeOpts || {}, NodeGit.MergeOptions); - checkoutOpts = normalizeOptions(checkoutOpts || {}, NodeGit.CheckoutOptions); - // Even though git_merge takes an array of annotated_commits, it expects // exactly one to have been passed in or it will throw an error... ¯\_(ツ)_/¯ var theirHeads = [theirHead]; diff --git a/lib/object.js b/lib/object.js index 85917987a3..680aebd121 100644 --- a/lib/object.js +++ b/lib/object.js @@ -1,3 +1,4 @@ +var util = require("util"); var NodeGit = require("../"); var Obj = NodeGit.Object; @@ -33,3 +34,12 @@ Obj.prototype.isTag = function() { Obj.prototype.isTree = function() { return this.type() == Obj.TYPE.TREE; }; + +// Deprecated ----------------------------------------------------------------- + +Object.defineProperty(Obj.TYPE, "BAD", { + get: util.deprecate( + () => Obj.TYPE.INVALID, + "Use NodeGit.Object.TYPE.INVALID instead of NodeGit.Object.TYPE.BAD." + ) +}); diff --git a/lib/odb.js b/lib/odb.js deleted file mode 100644 index 8bbd15a623..0000000000 --- a/lib/odb.js +++ /dev/null @@ -1,15 +0,0 @@ -var NodeGit = require("../"); - -var Odb = NodeGit.Odb; - -var _read = Odb.prototype.read; - -Odb.prototype.read = function(oid, callback) { - return _read.call(this, oid).then(function(odbObject) { - if (typeof callback === "function") { - callback(null, odbObject); - } - - return odbObject; - }, callback); -}; diff --git a/lib/rebase.js b/lib/rebase.js index 28882ea6c5..821d470ddd 100644 --- a/lib/rebase.js +++ b/lib/rebase.js @@ -1,12 +1,52 @@ var NodeGit = require("../"); var Rebase = NodeGit.Rebase; -var normalizeOptions = NodeGit.Utils.normalizeOptions; -var shallowClone = NodeGit.Utils.shallowClone; var _init = Rebase.init; var _open = Rebase.open; -var _abort = Rebase.prototype.abort; -var _commit = Rebase.prototype.commit; + +function defaultRebaseOptions(repository, options, checkoutStrategy) { + if (options) { + // Ensure we don't modify the passed-in options object. + // This could lead to us recursing commitCreateCb if the same + // options object is later re-used. + options = Object.assign({}, options); + + if (options.signingCb && !options.commitCreateCb) { + console.warn("signingCb is deperecated, use commitCreateCb instead."); + + let signingCb = options.signingCb; + options.commitCreateCb = function ( + author, + committer, + message_encoding, + message, + tree, + parent_count, + parents + ) { + return repository.createCommitWithSignature( + null, + author, + committer, + message, + tree, + parents, + signingCb).then((oid) => { + return oid; + }); + }; + } + } else if (checkoutStrategy) { + options = { + checkoutOptions: { + checkoutStrategy: checkoutStrategy + } + }; + } + + return options; +} + /** * Initializes a rebase * @async @@ -19,54 +59,14 @@ var _commit = Rebase.prototype.commit; * onto the given upstream * @param {RebaseOptions} options Options to specify how rebase is performed, * or NULL - * @param {Function} callback * @return {Remote} */ - -function defaultRebaseOptions(options, checkoutStrategy) { - var checkoutOptions; - var mergeOptions; - - if (options) { - options = shallowClone(options); - checkoutOptions = options.checkoutOptions; - mergeOptions = options.mergeOptions; - delete options.checkoutOptions; - delete options.mergeOptions; - - options = normalizeOptions(options, NodeGit.RebaseOptions); - } else { - options = normalizeOptions({}, NodeGit.RebaseOptions); - if (checkoutStrategy) { - checkoutOptions = { - checkoutStrategy: checkoutStrategy - }; - } - } - - if (checkoutOptions) { - options.checkoutOptions = normalizeOptions( - checkoutOptions, - NodeGit.CheckoutOptions - ); - } - - if (mergeOptions) { - options.mergeOptions = normalizeOptions( - mergeOptions, - NodeGit.MergeOptions - ); - } - - return options; -} - Rebase.init = function(repository, branch, upstream, onto, options) { - options = defaultRebaseOptions( + return _init(repository, branch, upstream, onto, defaultRebaseOptions( + repository, options, NodeGit.Checkout.STRATEGY.FORCE - ); - return _init(repository, branch, upstream, onto, options); + )); }; /** @@ -75,22 +75,12 @@ Rebase.init = function(repository, branch, upstream, onto, options) { * @async * @param {Repository} repo The repository that has a rebase in-progress * @param {RebaseOptions} options Options to specify how rebase is performed - * @param {Function} callback * @return {Remote} */ Rebase.open = function(repository, options) { - options = defaultRebaseOptions( + return _open(repository, defaultRebaseOptions( + repository, options, NodeGit.Checkout.STRATEGY.SAFE - ); - return _open(repository, options); -}; - -Rebase.prototype.commit = function(author, committer, encoding, message) { - return _commit.call(this, author, committer, encoding, message); -}; - -Rebase.prototype.abort = function() { - return _abort.call(this); + )); }; - diff --git a/lib/reference.js b/lib/reference.js index 821ace32f4..af3e006216 100644 --- a/lib/reference.js +++ b/lib/reference.js @@ -1,3 +1,4 @@ +var util = require("util"); var NodeGit = require("../"); var LookupWrapper = NodeGit.Utils.lookupWrapper; @@ -29,7 +30,7 @@ Reference.lookup = LookupWrapper(Reference); * @return {Boolean} */ Reference.prototype.isConcrete = function() { - return this.type() == Reference.TYPE.OID; + return this.type() == Reference.TYPE.DIRECT; }; /** @@ -63,3 +64,150 @@ Reference.prototype.isValid = function() { Reference.prototype.toString = function() { return this.name(); }; + +const getTerminal = (repo, refName, depth = 10, prevRef = null) => { + if (depth <= 0) { + return Promise.resolve({ + error: NodeGit.Error.CODE.ENOTFOUND, + out: prevRef + }); + } + + return NodeGit.Reference.lookup(repo, refName) + .then((ref) => { + if (ref.type() === NodeGit.Reference.TYPE.DIRECT) { + return { + error: NodeGit.Error.CODE.OK, + out: ref + }; + } else { + return getTerminal(repo, ref.symbolicTarget(), depth - 1, ref) + .then(({ error, out }) => { + if (error === NodeGit.Error.CODE.ENOTFOUND && !out) { + return { error, out: ref }; + } else { + return { error, out }; + } + }); + } + }) + .catch((error) => { + return { + error: error.errno, + out: null + }; + }); +}; + +const getSignatureForReflog = (repo) => { + const { email, name } = repo.ident(); + if (email && name) { + return Promise.resolve(NodeGit.Signature.now(name, email)); + } + + return NodeGit.Signature.default(repo) + .catch(() => NodeGit.Signature.now("unknown", "unknown")); +}; + +/** + * Given a reference name, follows symbolic links and updates the direct + * reference to point to a given OID. Updates the reflog with a given message. + * + * @async + * @param {Repository} repo The repo where the reference and objects live + * @param {String} refName The reference name to update + * @param {Oid} oid The target OID that the reference will point to + * @param {String} logMessage The reflog message to be writted + * @param {Signature} signature Optional signature to use for the reflog entry + */ +Reference.updateTerminal = function ( + repo, + refName, + oid, + logMessage, + signature +) { + let signatureToUse; + let promiseChain = Promise.resolve(); + + if (!signature) { + promiseChain = promiseChain + .then(() => getSignatureForReflog(repo)) + .then((sig) => { + signatureToUse = sig; + return Promise.resolve(); + }); + } else { + signatureToUse = signature; + } + + return promiseChain + .then(() => getTerminal(repo, refName)) + .then(({ error, out }) => { + if (error === NodeGit.Error.CODE.ENOTFOUND && out) { + return NodeGit.Reference.create( + repo, + out.symbolicTarget(), + oid, + 0, + logMessage + ); + } else if (error === NodeGit.Error.CODE.ENOTFOUND) { + return NodeGit.Reference.create( + repo, + refName, + oid, + 0, + logMessage + ); + } else { + return NodeGit.Reference.createMatching( + repo, + out.name(), + oid, + 1, + out.target(), + logMessage + ); + } + }) + .then(() => NodeGit.Reflog.read(repo, refName)) + .then((reflog) => { + // Janky, but works. Ideally, we would want to generate the correct reflog + // entry in the first place, rather than drop the most recent entry and + // write the correct one. + // NOTE: There is a theoretical race condition that could happen here. + // We may want to consider some kind of transactional logic to make sure + // that the reflog on disk isn't modified before we can write back. + reflog.drop(0, 1); + reflog.append(oid, signatureToUse, logMessage); + return reflog.write(); + }); +}; + +// Deprecated ----------------------------------------------------------------- + +Object.defineProperty(NodeGit.Reference.TYPE, "OID", { + get: util.deprecate( + () => NodeGit.Reference.TYPE.DIRECT, + "Use NodeGit.Reference.TYPE.DIRECT instead of NodeGit.Reference.TYPE.OID." + ) +}); + +Object.defineProperty(NodeGit.Reference.TYPE, "LISTALL", { + get: util.deprecate( + () => NodeGit.Reference.TYPE.ALL, + "Use NodeGit.Reference.TYPE.ALL instead of NodeGit.Reference.TYPE.LISTALL." + ) +}); + +NodeGit.Reference.NORMALIZE = {}; +Object.keys(NodeGit.Reference.FORMAT).forEach((key) => { + Object.defineProperty(NodeGit.Reference.NORMALIZE, `REF_FORMAT_${key}`, { + get: util.deprecate( + () => NodeGit.Reference.FORMAT[key], + `Use NodeGit.Reference.FORMAT.${key} instead of ` + + `NodeGit.Reference.NORMALIZE.REF_FORMAT_${key}.` + ) + }); +}); diff --git a/lib/remote.js b/lib/remote.js index b7c8979577..c0483ca87a 100644 --- a/lib/remote.js +++ b/lib/remote.js @@ -1,15 +1,8 @@ +var util = require("util"); var NodeGit = require("../"); -var normalizeFetchOptions = NodeGit.Utils.normalizeFetchOptions; -var normalizeOptions = NodeGit.Utils.normalizeOptions; var lookupWrapper = NodeGit.Utils.lookupWrapper; -var shallowClone = NodeGit.Utils.shallowClone; var Remote = NodeGit.Remote; -var _connect = Remote.prototype.connect; -var _download = Remote.prototype.download; -var _fetch = Remote.prototype.fetch; -var _push = Remote.prototype.push; -var _upload = Remote.prototype.upload; /** * Retrieves the remote by name @@ -21,97 +14,6 @@ var _upload = Remote.prototype.upload; */ Remote.lookup = lookupWrapper(Remote); -/** - * Connects to a remote - * - * @async - * @param {Enums.DIRECTION} direction The direction for the connection - * @param {RemoteCallbacks} callbacks The callback functions for the connection - * @param {ProxyOptions} proxyOpts Proxy settings - * @param {Array} customHeaders extra HTTP headers to use - * @param {Function} callback - * @return {Number} error code - */ -Remote.prototype.connect = function( - direction, - callbacks, - proxyOpts, - customHeaders -) { - callbacks = normalizeOptions(callbacks || {}, NodeGit.RemoteCallbacks); - proxyOpts = normalizeOptions(proxyOpts || {}, NodeGit.ProxyOptions); - customHeaders = customHeaders || []; - - return _connect.call(this, direction, callbacks, proxyOpts, customHeaders); -}; - -/** - * Connects to a remote - * - * @async - * @param {Array} refSpecs The ref specs that should be pushed - * @param {FetchOptions} opts The fetch options for download, contains callbacks - * @param {Function} callback - * @return {Number} error code - */ -Remote.prototype.download = function(refspecs, opts) { - return _download - .call(this, refspecs, normalizeFetchOptions(opts)); -}; - -/** - * Connects to a remote - * - * @async - * @param {Array} refSpecs The ref specs that should be pushed - * @param {FetchOptions} opts The fetch options for download, contains callbacks - * @param {String} message The message to use for the update reflog messages - * @param {Function} callback - * @return {Number} error code - */ -Remote.prototype.fetch = function(refspecs, opts, reflog_message) { - return _fetch - .call(this, refspecs, normalizeFetchOptions(opts), reflog_message); -}; - -/** - * Pushes to a remote - * - * @async - * @param {Array} refSpecs The ref specs that should be pushed - * @param {PushOptions} options Options for the checkout - * @param {Function} callback - * @return {Number} error code - */ -Remote.prototype.push = function(refSpecs, opts) { - var callbacks; - var proxyOpts; - - if (opts) { - opts = shallowClone(opts); - callbacks = opts.callbacks; - proxyOpts = opts.proxyOpts; - delete opts.callbacks; - delete opts.proxyOpts; - } else { - opts = {}; - } - - opts = normalizeOptions(opts, NodeGit.PushOptions); - - if (callbacks) { - opts.callbacks = - normalizeOptions(callbacks, NodeGit.RemoteCallbacks); - } - - if (proxyOpts) { - opts.proxyOpts = - normalizeOptions(proxyOpts, NodeGit.ProxyOptions); - } - - return _push.call(this, refSpecs, opts); -}; - /** * Lists advertised references from a remote. You must connect to the remote * before using referenceList. @@ -124,55 +26,20 @@ Remote.prototype.push = function(refSpecs, opts) { */ Remote.prototype.referenceList = Remote.prototype.referenceList; -/** - * Connects to a remote - * - * @async - * @param {Array} refSpecs The ref specs that should be pushed - * @param {FetchOptions} opts The fetch options for download, contains callbacks - * @param {String} message The message to use for the update reflog messages - * @param {Function} callback - * @return {Number} error code - */ -Remote.prototype.fetch = function(refspecs, opts, reflog_message) { - return _fetch - .call(this, refspecs, normalizeFetchOptions(opts), reflog_message); +NodeGit.Remote.COMPLETION_TYPE = {}; +var DEPRECATED_STATES = { + COMPLETION_DOWNLOAD: "DOWNLOAD", + COMPLETION_INDEXING: "INDEXING", + COMPLETION_ERROR: "ERROR" }; -/** - * Pushes to a remote - * - * @async - * @param {Array} refSpecs The ref specs that should be pushed - * @param {PushOptions} options Options for the checkout - * @param {Function} callback - * @return {Number} error code - */ -Remote.prototype.upload = function(refSpecs, opts) { - var callbacks; - var proxyOpts; - - if (opts) { - opts = shallowClone(opts); - callbacks = opts.callbacks; - proxyOpts = opts.proxyOpts; - delete opts.callbacks; - delete opts.proxyOpts; - } else { - opts = {}; - } - - opts = normalizeOptions(opts, NodeGit.PushOptions); - - if (callbacks) { - opts.callbacks = - normalizeOptions(callbacks, NodeGit.RemoteCallbacks); - } - - if (proxyOpts) { - opts.proxyOpts = - normalizeOptions(proxyOpts, NodeGit.ProxyOptions); - } - - return _upload.call(this, refSpecs, opts); -}; +Object.keys(DEPRECATED_STATES).forEach((key) => { + const newKey = DEPRECATED_STATES[key]; + Object.defineProperty(NodeGit.Remote.COMPLETION_TYPE, key, { + get: util.deprecate( + () => NodeGit.Remote.COMPLETION[newKey], + `Use NodeGit.Remote.COMPLETION.${newKey} instead of ` + + `NodeGit.Remote.COMPLETION_TYPE.${key}.` + ) + }); +}); diff --git a/lib/repository.js b/lib/repository.js index d5e9411340..64d566dcdc 100644 --- a/lib/repository.js +++ b/lib/repository.js @@ -4,7 +4,6 @@ var NodeGit = require("../"); var Blob = NodeGit.Blob; var Checkout = NodeGit.Checkout; var Commit = NodeGit.Commit; -var normalizeOptions = NodeGit.Utils.normalizeOptions; var shallowClone = NodeGit.Utils.shallowClone; var path = require("path"); var Filter = NodeGit.Filter; @@ -22,7 +21,6 @@ var Tree = NodeGit.Tree; var TreeBuilder = NodeGit.Treebuilder; var _discover = Repository.discover; -var _initExt = Repository.initExt; var _fetchheadForeach = Repository.prototype.fetchheadForeach; var _mergeheadForeach = Repository.prototype.mergeheadForeach; @@ -174,6 +172,22 @@ function getPathHunks(repo, index, filePath, isStaged, additionalDiffOptions) { }); } +function getReflogMessageForCommit(commit) { + var parentCount = commit.parentcount(); + var summary = commit.summary(); + var commitType; + + if (parentCount >= 2) { + commitType = " (merge)"; + } else if (parentCount == 0) { + commitType = " (initial)"; + } else { + commitType = ""; + } + + return `commit${commitType}: ${summary}`; +} + /** * Goes through a rebase's rebase operations and commits them if there are * no merge conflicts @@ -303,7 +317,7 @@ function performRebase( } /** - * Creates a branch with the passed in name pointing to the commit + * Look for a git repository, returning its path. * * @async * @param {String} startPath The base path where the lookup starts. @@ -315,62 +329,27 @@ function performRebase( are hit. This may be set to null * @return {String} Path of the git repository */ -Repository.discover = function(startPath, acrossFs, ceilingDirs, callback) { +Repository.discover = function(startPath, acrossFs, ceilingDirs) { return _discover(startPath, acrossFs, ceilingDirs) .then(function(foundPath) { - foundPath = path.resolve(foundPath); - if (typeof callback === "function") { - callback(null, foundPath); - } - return foundPath; - }, callback); -}; - -// Override Repository.initExt to normalize initoptions -Repository.initExt = function(repo_path, opts) { - opts = normalizeOptions(opts, NodeGit.RepositoryInitOptions); - return _initExt(repo_path, opts); + return path.resolve(foundPath); + }); }; -Repository.getReferences = function(repo, type, refNamesOnly, callback) { - return Reference.list(repo).then(function(refList) { - var refFilterPromises = []; - var filteredRefs = []; - - refList.forEach(function(refName) { - refFilterPromises.push(Reference.lookup(repo, refName) - .then(function(ref) { - if (type == Reference.TYPE.LISTALL || ref.type() == type) { - if (refNamesOnly) { - filteredRefs.push(refName); - return; - } - - if (ref.isSymbolic()) { - return ref.resolve().then(function(resolvedRef) { - resolvedRef.repo = repo; - - filteredRefs.push(resolvedRef); - }) - .catch(function() { - // If we can't resolve the ref then just ignore it. - }); - } - else { - filteredRefs.push(ref); - } - } - }) - ); +Repository.getReferences = function(repo, type, refNamesOnly) { + return repo.getReferences().then(function(refList) { + var filteredRefList = refList.filter(function(reference) { + return type === Reference.TYPE.ALL || reference.type( ) === type; }); - return Promise.all(refFilterPromises).then(function() { - if (typeof callback === "function") { - callback(null, filteredRefs); - } - return filteredRefs; - }, callback); + if (refNamesOnly) { + return filteredRefList.map(function(reference) { + return reference.name(); + }); + } + + return filteredRefList; }); }; @@ -439,38 +418,56 @@ Repository.prototype.checkoutRef = function(reference, opts) { * promise, finish() will be called when the * promise resolves. This callback will be * provided a detailed overview of the rebase + * @param {RebaseOptions} rebaseOptions Options to initialize the rebase object + * with * @return {Oid|Index} A commit id for a succesful merge or an index for a * rebase with conflicts */ Repository.prototype.continueRebase = function( signature, beforeNextFn, - beforeFinishFn + beforeFinishFn, + rebaseOptions ) { - var repo = this; + const repo = this; - signature = signature || repo.defaultSignature(); + let rebase; + let promiseChain = Promise.resolve(); - var rebase; - return repo.refreshIndex() - .then(function(index) { + if (!signature) { + promiseChain = promiseChain + .then(() => repo.defaultSignature()) + .then((signatureResult) => { + signature = signatureResult; + }); + } + + return promiseChain + .then(() => repo.refreshIndex()) + .then((index) => { if (index.hasConflicts()) { throw index; } - return NodeGit.Rebase.open(repo); + return NodeGit.Rebase.open(repo, rebaseOptions); }) - .then(function(_rebase) { + .then((_rebase) => { rebase = _rebase; return rebase.commit(null, signature) - .catch(function() { - // Ignore all errors to prevent - // this routine from choking now - // that we made rebase.commit - // asynchronous + .catch((e) => { + // If the first commit on continueRebase is a + // "patch already applied" error, + // interpret that as an explicit "skip commit" + // and ignore the error. + const errno = fp.get(["errno"], e); + if (errno === NodeGit.Error.CODE.EAPPLIED) { + return; + } + + throw e; }); }) - .then(function() { + .then(() => { return performRebase( repo, rebase, @@ -479,7 +476,7 @@ Repository.prototype.continueRebase = function( beforeFinishFn ); }) - .then(function(error) { + .then((error) => { if (error) { throw error; } @@ -521,6 +518,7 @@ Repository.prototype.createBranch = function(name, commit, force) { /** * Create a blob from a buffer * + * @async * @param {Buffer} buffer * @return {Oid} */ @@ -541,7 +539,7 @@ Repository.prototype.createBlobFromBuffer = function(buffer) { * @return {Oid} The oid of the commit */ Repository.prototype.createCommit = function( - updateRef, author, committer, message, tree, parents, callback) { + updateRef, author, committer, message, tree, parents) { var repo = this; var promises = []; @@ -576,13 +574,7 @@ Repository.prototype.createCommit = function( parents.length, parents ); - }).then(function(commit) { - if (typeof callback === "function") { - callback(null, commit); - } - - return commit; - }, callback); + }); }; /** @@ -622,6 +614,128 @@ Repository.prototype.createCommitBuffer = function( }); }; +/** + * Create a commit that is digitally signed + * + * @async + * @param {String} updateRef + * @param {Signature} author + * @param {Signature} committer + * @param {String} message + * @param {Tree|Oid|String} Tree + * @param {Array} parents + * @param {Function} onSignature Callback to be called with string to be signed + * @return {Oid} The oid of the commit + */ +Repository.prototype.createCommitWithSignature = function( + updateRef, + author, + committer, + message, + tree, + parents, + onSignature +) { + + var repo = this; + var promises = []; + var commitContent; + var skippedSigning; + + parents = parents || []; + + promises.push(repo.getTree(tree)); + + parents.forEach(function(parent) { + promises.push(repo.getCommit(parent)); + }); + + const createCommitPromise = Promise.all(promises).then(function(results) { + tree = results[0]; + + // Get the normalized values for our input into the function + var parentsLength = parents.length; + parents = []; + + for (var i = 0; i < parentsLength; i++) { + parents.push(results[i + 1]); + } + + return Commit.createBuffer( + repo, + author, + committer, + null /* use default message encoding */, + message, + tree, + parents.length, + parents + ); + }).then(function(commitContentResult) { + commitContent = commitContentResult; + if (!commitContent.endsWith("\n")) { + commitContent += "\n"; + } + return onSignature(commitContent); + }).then(function({ code, field, signedData }) { + switch (code) { + case NodeGit.Error.CODE.OK: + return Commit.createWithSignature( + repo, + commitContent, + signedData, + field + ); + case NodeGit.Error.CODE.PASSTHROUGH: + skippedSigning = true; + return Commit.create( + repo, + updateRef, + author, + committer, + null /* use default message encoding */, + message, + tree, + parents.length, + parents + ); + default: { + const error = new Error( + "Repository.prototype.createCommitWithSignature " + + `threw with error code ${code}` + ); + error.errno = code; + throw error; + } + } + }); + + if (!updateRef) { + return createCommitPromise; + } + + return createCommitPromise + .then(function(commitOid) { + if (skippedSigning) { + return commitOid; + } + + return repo.getCommit(commitOid) + .then(function(commitResult) { + return Reference.updateTerminal( + repo, + updateRef, + commitOid, + getReflogMessageForCommit(commitResult), + committer + ); + }) + .then(function() { + return commitOid; + }); + }); +}; + /** * Creates a new commit on HEAD from the list of passed in files * @@ -636,8 +750,7 @@ Repository.prototype.createCommitOnHead = function( filesToAdd, author, committer, - message, - callback) { + message) { var repo = this; @@ -676,7 +789,7 @@ Repository.prototype.createCommitOnHead = function( parent ); }); - }, callback); + }); }; /** @@ -687,7 +800,7 @@ Repository.prototype.createCommitOnHead = function( * @param {String} name the name of the tag * @return {Reference} */ -Repository.prototype.createLightweightTag = function(oid, name, callback) { +Repository.prototype.createLightweightTag = function(oid, name) { var repository = this; return Commit.lookup(repository, oid) @@ -704,8 +817,7 @@ Repository.prototype.createLightweightTag = function(oid, name, callback) { * Instantiate a new revision walker for browsing the Repository"s history. * See also `Commit.prototype.history()` * - * @param {String|Oid} String sha or Oid - * @return {RevWalk} + * @return {Revwalk} */ Repository.prototype.createRevWalk = function() { return Revwalk.create(this); @@ -721,32 +833,41 @@ Repository.prototype.createRevWalk = function() { * annotated tag * @return {Tag} */ -Repository.prototype.createTag = function(oid, name, message, callback) { - var repository = this; - var signature = repository.defaultSignature(); - - return Commit.lookup(repository, oid) - .then(function(commit) { +Repository.prototype.createTag = function(oid, name, message) { + const repository = this; + let signature = null; + + return repository.defaultSignature() + .then((signatureResult) => { + signature = signatureResult; + return Commit.lookup(repository, oid); + }) + .then((commit) => { // Final argument is `force` which overwrites any previous tag return Tag.create(repository, name, commit, signature, message, 0); }) - .then(function(tagOid) { - return repository.getTag(tagOid, callback); + .then((tagOid) => { + return repository.getTag(tagOid); }); }; /** * Gets the default signature for the default user and now timestamp + * + * @async * @return {Signature} */ Repository.prototype.defaultSignature = function() { - var result = NodeGit.Signature.default(this); - - if (!result || !result.name()) { - result = NodeGit.Signature.now("unknown", "unknown@example.com"); - } - - return result; + return NodeGit.Signature.default(this) + .then((result) => { + if (!result || !result.name()) { + result = NodeGit.Signature.now("unknown", "unknown@example.com"); + } + return result; + }) + .catch(() => { + return NodeGit.Signature.now("unknown", "unknown@example.com"); + }); }; /** @@ -846,28 +967,16 @@ Repository.prototype.discardLines = */ Repository.prototype.fetch = function( remote, - fetchOptions, - callback) + fetchOptions) { var repo = this; - function finallyFn(error) { - if (typeof callback === "function") { - callback(error); - } - } - return repo.getRemote(remote) .then(function(remote) { return remote.fetch(null, fetchOptions, "Fetch from " + remote) .then(function() { return remote.disconnect(); }); - }) - .then(finallyFn) - .catch(function(error) { - finallyFn(error); - throw error; }); }; @@ -878,12 +987,8 @@ Repository.prototype.fetch = function( * @async * @param {Object|FetchOptions} fetchOptions Options for the fetch, includes * callbacks for fetching - * @param {Function} callback */ -Repository.prototype.fetchAll = function( - fetchOptions, - callback) -{ +Repository.prototype.fetchAll = function(fetchOptions) { var repo = this; function createCallbackWrapper(fn, remote) { @@ -903,7 +1008,7 @@ Repository.prototype.fetchAll = function( var certificateCheck = remoteCallbacks.certificateCheck; var transferProgress = remoteCallbacks.transferProgress; - return repo.getRemotes() + return repo.getRemoteNames() .then(function(remotes) { return remotes.reduce(function(fetchPromise, remote) { var wrappedFetchOptions = shallowClone(fetchOptions); @@ -930,11 +1035,6 @@ Repository.prototype.fetchAll = function( return repo.fetch(remote, wrappedFetchOptions); }); }, Promise.resolve()); - }) - .then(function() { - if (typeof callback === "function") { - callback(); - } }); }; @@ -954,18 +1054,13 @@ Repository.prototype.fetchheadForeach = function(callback) { * @param {String|Oid} String sha or Oid * @return {Blob} */ -Repository.prototype.getBlob = function(oid, callback) { +Repository.prototype.getBlob = function(oid) { var repository = this; return Blob.lookup(repository, oid).then(function(blob) { blob.repo = repository; - - if (typeof callback === "function") { - callback(null, blob); - } - return blob; - }, callback); + }); }; /** @@ -976,8 +1071,8 @@ Repository.prototype.getBlob = function(oid, callback) { * or Branch Ref * @return {Reference} */ -Repository.prototype.getBranch = function(name, callback) { - return this.getReference(name, callback); +Repository.prototype.getBranch = function(name) { + return this.getReference(name); }; /** @@ -988,8 +1083,8 @@ Repository.prototype.getBranch = function(name, callback) { * or Branch Ref * @return {Commit} */ -Repository.prototype.getBranchCommit = function(name, callback) { - return this.getReferenceCommit(name, callback); +Repository.prototype.getBranchCommit = function(name) { + return this.getReferenceCommit(name); }; /** @@ -999,18 +1094,10 @@ Repository.prototype.getBranchCommit = function(name, callback) { * @param {String|Oid} String sha or Oid * @return {Commit} */ -Repository.prototype.getCommit = function(oid, callback) { +Repository.prototype.getCommit = function(oid) { var repository = this; - return Commit.lookup(repository, oid).then(function(commit) { - commit.repo = repository; - - if (typeof callback === "function") { - callback(null, commit); - } - - return commit; - }, callback); + return Commit.lookup(repository, oid); }; /** @@ -1030,12 +1117,12 @@ Repository.prototype.getCurrentBranch = function() { * @async * @return {Commit} */ -Repository.prototype.getHeadCommit = function(callback) { +Repository.prototype.getHeadCommit = function() { var repo = this; return Reference.nameToId(repo, "HEAD") .then(function(head) { - return repo.getCommit(head, callback); + return repo.getCommit(head); }) .catch(function() { return null; @@ -1048,8 +1135,8 @@ Repository.prototype.getHeadCommit = function(callback) { * @async * @return {Commit} */ -Repository.prototype.getMasterCommit = function(callback) { - return this.getBranchCommit("master", callback); +Repository.prototype.getMasterCommit = function() { + return this.getBranchCommit("master"); }; /** @@ -1060,28 +1147,20 @@ Repository.prototype.getMasterCommit = function(callback) { * or Branch Ref * @return {Reference} */ -Repository.prototype.getReference = function(name, callback) { +Repository.prototype.getReference = function(name) { var repository = this; return Reference.dwim(this, name).then(function(reference) { if (reference.isSymbolic()) { return reference.resolve().then(function(reference) { reference.repo = repository; - - if (typeof callback === "function") { - callback(null, reference); - } - return reference; - }, callback); - } else { - reference.repo = repository; - if (typeof callback === "function") { - callback(null, reference); - } - return reference; + }); } - }, callback); + + reference.repo = repository; + return reference; + }); }; /** @@ -1092,18 +1171,12 @@ Repository.prototype.getReference = function(name, callback) { * or Branch Ref * @return {Commit} */ -Repository.prototype.getReferenceCommit = function(name, callback) { +Repository.prototype.getReferenceCommit = function(name) { var repository = this; return this.getReference(name).then(function(reference) { - return repository.getCommit(reference.target()).then(function(commit) { - if (typeof callback === "function") { - callback(null, commit); - } - - return commit; - }); - }, callback); + return repository.getCommit(reference.target()); + }); }; /** @@ -1113,8 +1186,8 @@ Repository.prototype.getReferenceCommit = function(name, callback) { * @param {Reference.TYPE} type Type of reference to look up * @return {Array} */ -Repository.prototype.getReferenceNames = function(type, callback) { - return Repository.getReferences(this, type, true, callback); +Repository.prototype.getReferenceNames = function(type) { + return Repository.getReferences(this, type, true); }; /** @@ -1124,53 +1197,30 @@ Repository.prototype.getReferenceNames = function(type, callback) { * @param {Reference.TYPE} type Type of reference to look up * @return {Array} */ -Repository.prototype.getReferences = function(type, callback) { - return Repository.getReferences(this, type, false, callback); -}; /** * Gets a remote from the repo * * @async * @param {String|Remote} remote - * @param {Function} callback * @return {Remote} The remote object */ -Repository.prototype.getRemote = function(remote, callback) { +Repository.prototype.getRemote = function(remote) { if (remote instanceof NodeGit.Remote) { - return Promise.resolve(remote).then(function(remoteObj) { - if (typeof callback === "function") { - callback(null, remoteObj); - } - - return remoteObj; - }, callback); + return Promise.resolve(remote); } - return NodeGit.Remote.lookup(this, remote).then(function(remoteObj) { - if (typeof callback === "function") { - callback(null, remoteObj); - } - - return remoteObj; - }, callback); + return NodeGit.Remote.lookup(this, remote); }; /** * Lists out the remotes in the given repository. * * @async -* @param {Function} Optional callback * @return {Object} Promise object. */ -Repository.prototype.getRemotes = function(callback) { - return Remote.list(this).then(function(remotes) { - if (typeof callback === "function") { - callback(null, remotes); - } - - return remotes; - }, callback); +Repository.prototype.getRemoteNames = function() { + return Remote.list(this); }; /** @@ -1236,17 +1286,13 @@ Repository.prototype.getStatusExt = function(opts) { * @async * @return {Array} */ -Repository.prototype.getSubmoduleNames = function(callback) { +Repository.prototype.getSubmoduleNames = function() { var names = []; var submoduleCallback = function(submodule, name, payload) { names.push(name); }; return Submodule.foreach(this, submoduleCallback).then(function() { - if (typeof callback === "function") { - callback(null, names); - } - return names; }); }; @@ -1258,18 +1304,13 @@ Repository.prototype.getSubmoduleNames = function(callback) { * @param {String|Oid} String sha or Oid * @return {Tag} */ -Repository.prototype.getTag = function(oid, callback) { +Repository.prototype.getTag = function(oid) { var repository = this; return Tag.lookup(repository, oid).then(function(reference) { reference.repo = repository; - - if (typeof callback === "function") { - callback(null, reference); - } - return reference; - }, callback); + }); }; /** @@ -1279,22 +1320,18 @@ Repository.prototype.getTag = function(oid, callback) { * @param {String} Short or full tag name * @return {Tag} */ -Repository.prototype.getTagByName = function(name, callback) { +Repository.prototype.getTagByName = function(name) { var repo = this; name = ~name.indexOf("refs/tags/") ? name : "refs/tags/" + name; - return Reference.nameToId(repo, name).then(function(oid) { - return Tag.lookup(repo, oid).then(function(reference) { + return Reference.nameToId(repo, name) + .then(function(oid) { + return Tag.lookup(repo, oid); + }).then(function(reference) { reference.repo = repo; - - if (typeof callback === "function") { - callback(null, reference); - } - return reference; }); - }, callback); }; /** @@ -1304,18 +1341,13 @@ Repository.prototype.getTagByName = function(name, callback) { * @param {String|Oid} String sha or Oid * @return {Tree} */ -Repository.prototype.getTree = function(oid, callback) { +Repository.prototype.getTree = function(oid) { var repository = this; return Tree.lookup(repository, oid).then(function(tree) { tree.repo = repository; - - if (typeof callback === "function") { - callback(null, tree); - } - return tree; - }, callback); + }); }; /** @@ -1399,6 +1431,8 @@ Repository.prototype.isReverting = function() { * promise, finish() will be called when the * promise resolves. This callback will be * provided a detailed overview of the rebase + * @param {RebaseOptions} rebaseOptions Options to initialize the rebase object + * with * @return {Oid|Index} A commit id for a succesful merge or an index for a * rebase with conflicts */ @@ -1412,12 +1446,21 @@ Repository.prototype.rebaseBranches = function( rebaseOptions ) { - var repo = this; - var branchCommit; - var upstreamCommit; - var ontoCommit; - var mergeOptions = (rebaseOptions || {}).mergeOptions; - signature = signature || repo.defaultSignature(); + const repo = this; + let branchCommit; + let upstreamCommit; + let ontoCommit; + let mergeOptions = (rebaseOptions || {}).mergeOptions; + + let promiseChain = Promise.resolve(); + + if (!signature) { + promiseChain = promiseChain + .then(() => repo.defaultSignature()) + .then((signatureResult) => { + signature = signatureResult; + }); + } return Promise.all([ repo.getReference(branch), @@ -1486,19 +1529,12 @@ Repository.prototype.rebaseBranches = function( * @async * @return {Index} */ -Repository.prototype.refreshIndex = function(callback) { +Repository.prototype.refreshIndex = function() { var repo = this; repo.setIndex(); // clear the index - return repo.index() - .then(function(index) { - if (typeof callback === "function") { - callback(null, index); - } - - return index; - }, callback); + return repo.index(); }; /** @@ -1510,6 +1546,7 @@ Repository.prototype.refreshIndex = function(callback) { * @param {Signature} signature * @param {Merge.PREFERENCE} mergePreference * @param {MergeOptions} mergeOptions + * @param {MergeBranchOptions} mergeBranchOptions * @return {Oid|Index} A commit id for a succesful merge or an index for a * merge with conflicts */ @@ -1519,23 +1556,41 @@ Repository.prototype.mergeBranches = function( signature, mergePreference, mergeOptions, - processMergeMessageCallback + mergeBranchOptions ) { - var repo = this; - var fromBranch; - var toBranch; - processMergeMessageCallback = processMergeMessageCallback || + const repo = this; + let fromBranch; + let toBranch; + // Support old parameter `processMergeMessageCallback` + const isOldOptionParameter = typeof mergeBranchOptions === "function"; + if (isOldOptionParameter) { + console.error("DeprecationWarning: Repository#mergeBranches parameter " + + "processMergeMessageCallback, use MergeBranchOptions"); + } + const processMergeMessageCallback = mergeBranchOptions && + (isOldOptionParameter ? + mergeBranchOptions : + mergeBranchOptions.processMergeMessageCallback) || function (message) { return message; }; + const signingCallback = mergeBranchOptions && mergeBranchOptions.signingCb; mergePreference = mergePreference || NodeGit.Merge.PREFERENCE.NONE; - mergeOptions = normalizeOptions(mergeOptions, NodeGit.MergeOptions); - signature = signature || repo.defaultSignature(); + let promiseChain = Promise.resolve(); - return Promise.all([ - repo.getBranch(to), - repo.getBranch(from) - ]).then(function(objects) { + if (!signature) { + promiseChain = promiseChain + .then(() => repo.defaultSignature()) + .then((signatureResult) => { + signature = signatureResult; + }); + } + + return promiseChain.then(() => Promise.all([ + repo.getBranch(to), + repo.getBranch(from) + ])) + .then((objects) => { toBranch = objects[0]; fromBranch = objects[1]; @@ -1544,12 +1599,12 @@ Repository.prototype.mergeBranches = function( repo.getBranchCommit(fromBranch) ]); }) - .then(function(branchCommits) { + .then((branchCommits) => { var toCommitOid = branchCommits[0].toString(); var fromCommitOid = branchCommits[1].toString(); return NodeGit.Merge.base(repo, toCommitOid, fromCommitOid) - .then(function(baseCommit) { + .then((baseCommit) => { if (baseCommit.toString() == fromCommitOid) { // The commit we're merging to is already in our history. // nothing to do so just return the commit the branch is on @@ -1565,7 +1620,7 @@ Repository.prototype.mergeBranches = function( fromBranch.shorthand(); return branchCommits[1].getTree() - .then(function(tree) { + .then((tree) => { if (toBranch.isHead()) { // Checkout the tree if we're on the branch var opts = { @@ -1575,11 +1630,11 @@ Repository.prototype.mergeBranches = function( return NodeGit.Checkout.tree(repo, tree, opts); } }) - .then(function() { + .then(() => { return toBranch.setTarget( fromCommitOid, message) - .then(function() { + .then(() => { return fromCommitOid; }); }); @@ -1588,10 +1643,10 @@ Repository.prototype.mergeBranches = function( var updateHead; // We have to merge. Lets do it! return NodeGit.Reference.lookup(repo, "HEAD") - .then(function(headRef) { + .then((headRef) => { return headRef.resolve(); }) - .then(function(headRef) { + .then((headRef) => { updateHead = !!headRef && (headRef.name() === toBranch.name()); return NodeGit.Merge.commits( repo, @@ -1600,7 +1655,7 @@ Repository.prototype.mergeBranches = function( mergeOptions ); }) - .then(function(index) { + .then((index) => { // if we have conflicts then throw the index if (index.hasConflicts()) { throw index; @@ -1609,7 +1664,7 @@ Repository.prototype.mergeBranches = function( // No conflicts so just go ahead with the merge return index.writeTreeTo(repo); }) - .then(function(oid) { + .then((oid) => { var mergeDecorator; if (fromBranch.isTag()) { mergeDecorator = "tag"; @@ -1633,7 +1688,18 @@ Repository.prototype.mergeBranches = function( return Promise.all([oid, processMergeMessageCallback(message)]); }) - .then(function([oid, message]) { + .then(([oid, message]) => { + if (signingCallback) { + return repo.createCommitWithSignature( + toBranch.name(), + signature, + signature, + message, + oid, + [toCommitOid, fromCommitOid], + signingCallback + ); + } return repo.createCommit( toBranch.name(), signature, @@ -1643,25 +1709,25 @@ Repository.prototype.mergeBranches = function( [toCommitOid, fromCommitOid] ); }) - .then(function(commit) { + .then((commit) => { // we've updated the checked out branch, so make sure we update // head so that our index isn't messed up if (updateHead) { return repo.getBranch(to) - .then(function(branch) { + .then((branch) => { return repo.getBranchCommit(branch); }) - .then(function(branchCommit) { + .then((branchCommit) => { return branchCommit.getTree(); }) - .then(function(tree) { + .then((tree) => { var opts = { checkoutStrategy: NodeGit.Checkout.STRATEGY.SAFE | NodeGit.Checkout.STRATEGY.RECREATE_MISSING }; return NodeGit.Checkout.tree(repo, tree, opts); }) - .then(function() { + .then(() => { return commit; }); } @@ -1867,7 +1933,7 @@ Repository.prototype.stageLines = ); }) .then(function(newContent) { - var newContentBuffer = new Buffer(newContent); + var newContentBuffer = Buffer.from(newContent); return repo.createBlobFromBuffer(newContentBuffer); }) diff --git a/lib/reset.js b/lib/reset.js index 18a7ebfe1c..38b6b1bffc 100644 --- a/lib/reset.js +++ b/lib/reset.js @@ -1,10 +1,8 @@ var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; var Reset = NodeGit.Reset; var _default = Reset.default; var _reset = Reset.reset; -var _fromAnnotated = Reset.fromAnnotated; /** * Look up a refs's commit. @@ -44,29 +42,10 @@ Reset.default = function(repo, target, pathspecs) { * @return {Number} 0 on success or an error code */ Reset.reset = function(repo, target, resetType, opts) { - opts = normalizeOptions(opts, NodeGit.CheckoutOptions); - + if (repo !== target.repo) { + // this is the same that is performed on libgit2's side + // https://github.com/nodegit/libgit2/blob/8d89e409616831b7b30a5ca7b89354957137b65e/src/reset.c#L120-L124 + throw new Error("Repository and target commit's repository does not match"); + } return _reset.call(this, repo, target, resetType, opts); }; - -/** - * Sets the current head to the specified commit oid and optionally - * resets the index and working tree to match. - * - * This behaves like reset but takes an annotated commit, which lets - * you specify which extended sha syntax string was specified by a - * user, allowing for more exact reflog messages. - * - * See the documentation for reset. - * - * @async - * @param {Repository} repo - * @param {AnnotatedCommit} target - * @param {Number} resetType - * @param {CheckoutOptions} opts - */ -Reset.fromAnnotated = function(repo, target, resetType, opts) { - opts = normalizeOptions(opts, NodeGit.CheckoutOptions); - - return _fromAnnotated.call(this, repo, target, resetType, opts); -}; diff --git a/lib/revert.js b/lib/revert.js deleted file mode 100644 index 2dddb2cee3..0000000000 --- a/lib/revert.js +++ /dev/null @@ -1,85 +0,0 @@ -var NodeGit = require("../"); -var shallowClone = NodeGit.Utils.shallowClone; -var normalizeOptions = NodeGit.Utils.normalizeOptions; - -var Revert = NodeGit.Revert; -var _commit = Revert.commit; -var _revert = Revert.revert; - -/** - * Reverts the given commit against the given "our" commit, producing an index - * that reflects the result of the revert. - * - * @async - * @param {Repository} repo the repository that contains the given commits. - * @param {Commit} revert_commit the commit to revert - * @param {Commit} our_commit the commit to revert against (e.g. HEAD) - * @param {Number} mainline the parent of the revert commit, if it is a merge - * @param {MergeOptions} merge_options the merge options (or null for defaults) - * - * @return {Index} the index result - */ -Revert.commit = function( - repo, - revert_commit, - our_commit, - mainline, - merge_options, - callback -) -{ - merge_options = normalizeOptions(merge_options, NodeGit.MergeOptions); - - return _commit.call( - this, - repo, - revert_commit, - our_commit, - mainline, - merge_options - ) - .then(function(result) { - if (typeof callback === "function") { - callback(null, result); - } - - return result; - }, callback); -}; - -/** - * Reverts the given commit, producing changes in the index and - * working directory. - * - * @async - * @param {Repository} repo the repository to perform the revert in - * @param {Commit} commit the commit to revert - * @param {RevertOptions} revert_options the revert options - * (or null for defaults) - */ -Revert.revert = function(repo, commit, revertOpts) { - var mergeOpts; - var checkoutOpts; - - if (revertOpts) { - revertOpts = shallowClone(revertOpts); - mergeOpts = revertOpts.mergeOpts; - checkoutOpts = revertOpts.checkoutOpts; - delete revertOpts.mergeOpts; - delete revertOpts.checkoutOpts; - } - - revertOpts = normalizeOptions(revertOpts, NodeGit.RevertOptions); - - if (revertOpts) { - revertOpts.mergeOpts = - normalizeOptions(mergeOpts, NodeGit.MergeOptions); - } - - if (checkoutOpts) { - revertOpts.checkoutOpts = - normalizeOptions(checkoutOpts, NodeGit.CheckoutOptions); - } - - return _revert.call(this, repo, commit, revertOpts); -}; diff --git a/lib/revparse.js b/lib/revparse.js new file mode 100644 index 0000000000..74c1fa4c24 --- /dev/null +++ b/lib/revparse.js @@ -0,0 +1,18 @@ +var util = require("util"); +var NodeGit = require("../"); + +const MODE = { + SINGLE: 1, + RANGE: 2, + MERGE_BASE: 4, +}; + +NodeGit.Revparse.MODE = {}; +Object.keys(MODE).forEach((key) => { + Object.defineProperty(NodeGit.Revparse.MODE, key, { + get: util.deprecate( + () => MODE[key], + `Use NodeGit.Revspec.TYPE.${key} instead of NodeGit.Revparse.MODE.${key}.` + ) + }); +}); diff --git a/lib/revwalk.js b/lib/revwalk.js index a12d9f7f49..7787fc89b1 100644 --- a/lib/revwalk.js +++ b/lib/revwalk.js @@ -119,7 +119,7 @@ Revwalk.prototype.walk = function(oid, callback) { this.push(oid); function walk() { - revwalk.next().done(function(oid) { + revwalk.next().then(function(oid) { if (!oid) { if (typeof callback === "function") { return callback(); diff --git a/lib/signature.js b/lib/signature.js index 7fc8e274d3..4dfe84a363 100644 --- a/lib/signature.js +++ b/lib/signature.js @@ -1,11 +1,38 @@ var NodeGit = require("../"); var Signature = NodeGit.Signature; +const toPaddedDoubleDigitString = (number) => { + if (number < 10) { + return `0${number}`; + } + + return `${number}`; +}; + /** * Standard string representation of an author. - * - * @return {string} Representation of the author. + * @param {Boolean} withTime Whether or not to include timestamp + * @return {String} Representation of the author. */ -Signature.prototype.toString = function() { - return this.name().toString() + " <" + this.email().toString() + ">"; +Signature.prototype.toString = function(withTime) { + const name = this.name().toString(); + const email = this.email().toString(); + + let stringifiedSignature = `${name} <${email}>`; + + if (!withTime) { + return stringifiedSignature; + } + + const when = this.when(); + const offset = when.offset(); + const offsetMagnitude = Math.abs(offset); + const time = when.time(); + + const sign = (offset < 0 || when.sign() === "-") ? "-" : "+"; + const hours = toPaddedDoubleDigitString(Math.floor(offsetMagnitude / 60)); + const minutes = toPaddedDoubleDigitString(offsetMagnitude % 60); + + stringifiedSignature += ` ${time} ${sign}${hours}${minutes}`; + return stringifiedSignature; }; diff --git a/lib/stash.js b/lib/stash.js index 7067b24ce0..88e9f3510d 100644 --- a/lib/stash.js +++ b/lib/stash.js @@ -1,32 +1,7 @@ var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; -var shallowClone = NodeGit.Utils.shallowClone; var Stash = NodeGit.Stash; -var _apply = Stash.apply; var _foreach = Stash.foreach; -var _pop = Stash.pop; - -Stash.apply = function(repo, index, options) { - var checkoutOptions; - - if (options) { - options = shallowClone(options); - checkoutOptions = options.checkoutOptions; - delete options.checkoutOptions; - } else { - options = {}; - } - - options = normalizeOptions(options, NodeGit.StashApplyOptions); - - if (checkoutOptions) { - options.checkoutOptions = - normalizeOptions(checkoutOptions, NodeGit.CheckoutOptions); - } - - return _apply(repo, index, options); -}; // Override Stash.foreach to eliminate the need to pass null payload Stash.foreach = function(repo, callback) { @@ -39,24 +14,3 @@ Stash.foreach = function(repo, callback) { return _foreach(repo, wrappedCallback, null); }; - -Stash.pop = function(repo, index, options) { - var checkoutOptions; - - if (options) { - options = shallowClone(options); - checkoutOptions = options.checkoutOptions; - delete options.checkoutOptions; - } else { - options = {}; - } - - options = normalizeOptions(options, NodeGit.StashApplyOptions); - - if (checkoutOptions) { - options.checkoutOptions = - normalizeOptions(checkoutOptions, NodeGit.CheckoutOptions); - } - - return _pop(repo, index, options); -}; diff --git a/lib/status.js b/lib/status.js index c5c762baae..93aca08e09 100644 --- a/lib/status.js +++ b/lib/status.js @@ -1,5 +1,4 @@ var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; var Status = NodeGit.Status; @@ -11,8 +10,7 @@ Status.foreach = function(repo, callback) { return _foreach(repo, callback, null); }; -// Override Status.foreachExt to normalize opts +// Override Status.foreachExt to eliminate the need to pass null payload Status.foreachExt = function(repo, opts, callback) { - opts = normalizeOptions(opts, NodeGit.StatusOptions); return _foreachExt(repo, opts, callback, null); }; diff --git a/lib/status_list.js b/lib/status_list.js deleted file mode 100644 index efccbad2ef..0000000000 --- a/lib/status_list.js +++ /dev/null @@ -1,12 +0,0 @@ -var NodeGit = require("../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; - -var StatusList = NodeGit.StatusList; - -var _create = StatusList.create; - -// Override StatusList.create to normalize opts -StatusList.create = function(repo, opts) { - opts = normalizeOptions(opts, NodeGit.StatusOptions); - return _create(repo, opts); -}; diff --git a/lib/submodule.js b/lib/submodule.js index 92b0e6ec04..2b0cb530db 100644 --- a/lib/submodule.js +++ b/lib/submodule.js @@ -1,51 +1,10 @@ var NodeGit = require("../"); -var normalizeFetchOptions = NodeGit.Utils.normalizeFetchOptions; -var normalizeOptions = NodeGit.Utils.normalizeOptions; -var shallowClone = NodeGit.Utils.shallowClone; var Submodule = NodeGit.Submodule; var _foreach = Submodule.foreach; -var _update = Submodule.prototype.update; // Override Submodule.foreach to eliminate the need to pass null payload Submodule.foreach = function(repo, callback) { return _foreach(repo, callback, null); }; - -/** - * Updates a submodule - * - * @async - * @param {Number} init Setting this to 1 will initialize submodule - * before updating - * @param {SubmoduleUpdateOptions} options Submodule update settings - * @return {Number} 0 on success, any non-zero return value from a callback - */ -Submodule.prototype.update = function(init, options) { - var fetchOpts; - var checkoutOpts; - - if (options) { - options = shallowClone(options); - fetchOpts = options.fetchOpts; - checkoutOpts = options.checkoutOpts; - delete options.fetchOpts; - delete options.checkoutOpts; - } - - options = normalizeOptions(options, NodeGit.SubmoduleUpdateOptions); - - if (fetchOpts) { - options.fetchOpts = normalizeFetchOptions(fetchOpts); - } - - if (checkoutOpts) { - options.checkoutOpts = normalizeOptions( - checkoutOpts, - NodeGit.CheckoutOptions - ); - } - - return _update.call(this, init, options); -}; diff --git a/lib/tag.js b/lib/tag.js index bf8ddff49f..aae20c9302 100644 --- a/lib/tag.js +++ b/lib/tag.js @@ -1,12 +1,141 @@ +var util = require("util"); var NodeGit = require("../"); var LookupWrapper = NodeGit.Utils.lookupWrapper; var Tag = NodeGit.Tag; +const signatureRegexesBySignatureType = { + gpgsig: [ + /-----BEGIN PGP SIGNATURE-----[\s\S]+?-----END PGP SIGNATURE-----/gm, + /-----BEGIN PGP MESSAGE-----[\s\S]+?-----END PGP MESSAGE-----/gm, + ], + x509: [ + /-----BEGIN SIGNED MESSAGE-----[\s\S]+?-----END SIGNED MESSAGE-----/gm, + ] +}; + /** -* Retrieves the tag pointed to by the oid -* @async -* @param {Repository} repo The repo that the tag lives in -* @param {String|Oid|Tag} id The tag to lookup -* @return {Tag} -*/ + * Retrieves the tag pointed to by the oid + * @async + * @param {Repository} repo The repo that the tag lives in + * @param {String|Oid|Tag} id The tag to lookup + * @return {Tag} + */ Tag.lookup = LookupWrapper(Tag); + +/** + * @async + * @param {Repository} repo + * @param {String} tagName + * @param {Oid} target + * @param {Signature} tagger + * @return {String} + */ +Tag.createBuffer = function(repo, tagName, target, tagger, message) { + return NodeGit.Object.lookup(repo, target, NodeGit.Object.TYPE.ANY) + .then((object) => { + if (!NodeGit.Object.typeisloose(object.type())) { + throw new Error("Object must be a loose type"); + } + + const id = object.id().toString(); + const objectType = NodeGit.Object.type2String(object.type()); + const lines = [ + `object ${id}`, + `type ${objectType}`, + `tag ${tagName}`, + `tagger ${tagger.toString(true)}\n`, + `${message}${message.endsWith("\n") ? "" : "\n"}` + ]; + return lines.join("\n"); + }); +}; + +const deprecatedCreateWithSignatureHelper = util.deprecate(function(repo, oidTarget) { + return repo.getCommit(oidTarget); +}, "Tag.createWithSignature target should be a Git Object, not Oid"); + +/** + * @async + * @param {Repository} repo + * @param {String} tagName + * @param {Object} target + * @param {Signature} tagger + * @param {String} message + * @param {Number} force + * @param {Function} signingCallback Takes a string and returns a string + * representing the signed message + * @return {Oid} + */ +Tag.createWithSignature = async ( + repo, + tagName, + target, + tagger, + message, + force, + signingCallback +) => { + let targetOid; + if (!target.id) { + targetOid = await deprecatedCreateWithSignatureHelper(repo, target); + } else { + targetOid = target; + } + + const tagBuffer = await Tag.createBuffer(repo, tagName, targetOid.id(), tagger, message); + const { code, signedData } = await signingCallback(tagBuffer); + switch (code) { + case NodeGit.Error.CODE.OK: { + const normalizedEnding = signedData.endsWith("\n") ? "" : "\n"; + const signedTagString = tagBuffer + signedData + normalizedEnding; + return Tag.createFromBuffer(repo, signedTagString, force); + } + case NodeGit.Error.CODE.PASSTHROUGH: + return Tag.create( + repo, + tagName, + targetOid, + tagger, + message, + force + ); + default: { + const error = new Error( + `Tag.createWithSignature threw with error code ${code}` + ); + error.errno = code; + throw error; + } + } +}; + +/** + * Retrieves the signature of an annotated tag + * @async + * @param {String} signatureType + * @return {String|null} + */ +Tag.prototype.extractSignature = function(signatureType = "gpgsig") { + const id = this.id(); + const repo = this.repo; + const signatureRegexes = signatureRegexesBySignatureType[signatureType]; + if (!signatureRegexes) { + throw new Error("Unsupported signature type"); + } + + return repo.odb().then((odb) => { + return odb.read(id); + }).then((odbObject) => { + const odbData = odbObject.toString(); + + for (const regex of signatureRegexes) { + const matchResult = odbData.match(regex); + + if (matchResult !== null) { + return matchResult[0]; + } + } + + throw new Error("this tag is not signed"); + }); +}; diff --git a/lib/tree.js b/lib/tree.js index 1be6870751..1067fa3f2d 100644 --- a/lib/tree.js +++ b/lib/tree.js @@ -11,7 +11,6 @@ var Treebuilder = NodeGit.Treebuilder; * @async * @param {Repository} repo The repo that the tree lives in * @param {String|Oid|Tree} id The tree to lookup -* @param {Function} callback * @return {Tree} */ Tree.lookup = LookupWrapper(Tree); @@ -33,11 +32,10 @@ Tree.prototype.builder = function() { * Diff two trees * @async * @param {Tree} tree to diff against - * @param {Function} callback - * @return {DiffList} + * @return {Diff} */ -Tree.prototype.diff = function(tree, callback) { - return this.diffWithOptions(tree, null, callback); +Tree.prototype.diff = function(tree) { + return this.diffWithOptions(tree, null); }; /** @@ -45,17 +43,10 @@ Tree.prototype.diff = function(tree, callback) { * @async * @param {Tree} tree to diff against * @param {Object} options - * @param {Function} callback - * @return {DiffList} + * @return {Diff} */ -Tree.prototype.diffWithOptions = function(tree, options, callback) { - return Diff.treeToTree(this.repo, tree, this, options).then(function(diff) { - if (typeof callback === "function") { - callback(null, diff); - } - - return diff; - }, callback); +Tree.prototype.diffWithOptions = function(tree, options) { + return Diff.treeToTree(this.repo, tree, this, options); }; /** @@ -104,17 +95,12 @@ Tree.prototype.entryByName = function(name) { * @param {String} filePath * @return {TreeEntry} */ -Tree.prototype.getEntry = function(filePath, callback) { +Tree.prototype.getEntry = function(filePath) { var tree = this; return this.entryByPath(filePath).then(function(entry) { entry.parent = tree; entry.dirtoparent = path.dirname(filePath); - - if (typeof callback === "function") { - callback(null, entry); - } - return entry; }); }; @@ -171,7 +157,8 @@ Tree.prototype.walk = function(blobsOnly) { if (entry.isTree()) { total++; - entry.getTree(bfs); + entry.getTree() + .then(result => bfs(null, result), bfs); } }); diff --git a/lib/tree_entry.js b/lib/tree_entry.js index b9bc0fd72e..a978de9a3c 100644 --- a/lib/tree_entry.js +++ b/lib/tree_entry.js @@ -7,14 +7,8 @@ var TreeEntry = NodeGit.TreeEntry; * @async * @return {Blob} */ -TreeEntry.prototype.getBlob = function(callback) { - return this.parent.repo.getBlob(this.id()).then(function(blob) { - if (typeof callback === "function") { - callback(null, blob); - } - - return blob; - }, callback); +TreeEntry.prototype.getBlob = function() { + return this.parent.repo.getBlob(this.id()); }; /** @@ -22,18 +16,13 @@ TreeEntry.prototype.getBlob = function(callback) { * @async * @return {Tree} */ -TreeEntry.prototype.getTree = function(callback) { +TreeEntry.prototype.getTree = function() { var entry = this; return this.parent.repo.getTree(this.id()).then(function(tree) { tree.entry = entry; - - if (typeof callback === "function") { - callback(null, tree); - } - return tree; - }, callback); + }); }; /** @@ -89,7 +78,7 @@ TreeEntry.prototype.oid = function() { * Returns the path for this entry. * @return {String} */ -TreeEntry.prototype.path = function(callback) { +TreeEntry.prototype.path = function() { var dirtoparent = this.dirtoparent || ""; return path.join(this.parent.path(), dirtoparent, this.name()); }; diff --git a/lib/utils/normalize_fetch_options.js b/lib/utils/normalize_fetch_options.js deleted file mode 100644 index 2b43707056..0000000000 --- a/lib/utils/normalize_fetch_options.js +++ /dev/null @@ -1,43 +0,0 @@ -var NodeGit = require("../../"); -var normalizeOptions = NodeGit.Utils.normalizeOptions; -var shallowClone = NodeGit.Utils.shallowClone; - -/** - * Normalize an object to match a struct. - * - * @param {String, Object} oid - The oid string or instance. - * @return {Object} An Oid instance. - */ -function normalizeFetchOptions(options) { - if (options instanceof NodeGit.FetchOptions) { - return options; - } - - var callbacks; - var proxyOpts; - - if (options) { - options = shallowClone(options); - callbacks = options.callbacks; - proxyOpts = options.proxyOpts; - delete options.callbacks; - delete options.proxyOpts; - } else { - options = {}; - } - - options = normalizeOptions(options, NodeGit.FetchOptions); - - if (callbacks) { - options.callbacks = - normalizeOptions(callbacks, NodeGit.RemoteCallbacks); - } - - if (proxyOpts) { - options.proxyOpts = - normalizeOptions(proxyOpts, NodeGit.ProxyOptions); - } - return options; -} - -NodeGit.Utils.normalizeFetchOptions = normalizeFetchOptions; diff --git a/lib/utils/normalize_options.js b/lib/utils/normalize_options.js deleted file mode 100644 index a1a46255c0..0000000000 --- a/lib/utils/normalize_options.js +++ /dev/null @@ -1,29 +0,0 @@ -var NodeGit = require("../../"); - -/** - * Normalize an object to match a struct. - * - * @param {String, Object} oid - The oid string or instance. - * @return {Object} An Oid instance. - */ -function normalizeOptions(options, Ctor) { - if (!options) { - return null; - } - - if (options instanceof Ctor) { - return options; - } - - var instance = new Ctor(); - - Object.keys(options).forEach(function(key) { - if (typeof options[key] !== "undefined") { - instance[key] = options[key]; - } - }); - - return instance; -} - -NodeGit.Utils.normalizeOptions = normalizeOptions; diff --git a/lifecycleScripts/install.js b/lifecycleScripts/install.js old mode 100644 new mode 100755 index 16b0471201..96e47afbe5 --- a/lifecycleScripts/install.js +++ b/lifecycleScripts/install.js @@ -1,5 +1,8 @@ var buildFlags = require("../utils/buildFlags"); var spawn = require("child_process").spawn; +var path = require("path"); + +const nodePreGypModulePath = require.resolve("@mapbox/node-pre-gyp"); module.exports = function install() { console.log("[nodegit] Running install script"); @@ -28,7 +31,17 @@ module.exports = function install() { } return new Promise(function(resolve, reject) { - var spawnedNodePreGyp = spawn(nodePreGyp, args); + const gypPath = path.join(__dirname, "..", "node_modules", "node-gyp", "bin", "node-gyp.js"); + + const nodePreGypPath = path.resolve(path.dirname(nodePreGypModulePath), path.join("..", "bin", nodePreGyp)); + console.log("node-pre-gyp path", nodePreGypPath); + var spawnedNodePreGyp = spawn(nodePreGypPath, args, { + env: { + ...process.env, + npm_config_node_gyp: gypPath + }, + shell: process.platform === "win32" + }); spawnedNodePreGyp.stdout.on("data", function(data) { console.info(data.toString().trim()); diff --git a/lifecycleScripts/postinstall.js b/lifecycleScripts/postinstall.js index 7e44aef28f..f8260e3b73 100755 --- a/lifecycleScripts/postinstall.js +++ b/lifecycleScripts/postinstall.js @@ -30,7 +30,7 @@ module.exports = function install() { return Promise.resolve(); } - return exec("node \"" + path.join(rootPath, "dist/nodegit.js\"")) + return exec("node \"" + path.join(rootPath, "lib/nodegit.js\"")) .catch(function(e) { if (~e.toString().indexOf("Module version mismatch")) { console.warn( @@ -46,7 +46,7 @@ module.exports = function install() { } }) .then(function() { - // Is we're using NodeGit from a package manager then let's clean up after + // If we're using NodeGit from a package manager then let's clean up after // ourselves when we install successfully. if (!buildFlags.mustBuild) { // We can't remove the source files yet because apparently the diff --git a/lifecycleScripts/preinstall.js b/lifecycleScripts/preinstall.js old mode 100644 new mode 100755 index 6d481658bf..870cf15580 --- a/lifecycleScripts/preinstall.js +++ b/lifecycleScripts/preinstall.js @@ -8,14 +8,22 @@ module.exports = function prepareForBuild() { console.log("[nodegit] Running pre-install script"); return exec("npm -v") - .then(function(npmVersion) { - if (npmVersion.split(".")[0] < 3) { - console.log("[nodegit] npm@2 installed, pre-loading required packages"); - return exec("npm install --ignore-scripts"); - } + .then( + function(npmVersion) { + if (npmVersion.split(".")[0] < 3) { + console.log( + "[nodegit] npm@2 installed, pre-loading required packages" + ); + return exec("npm install --ignore-scripts"); + } - return Promise.resolve(); - }) + return Promise.resolve(); + }, + function() { + // We're installing via yarn, so don't + // care about compability with npm@2 + } + ) .then(function() { if (buildFlags.isGitRepo) { var submodules = require(local("submodules")); diff --git a/package-lock.json b/package-lock.json index f98c11bc7a..93f2f440d8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,2051 +1,6825 @@ { "name": "nodegit", - "version": "0.24.0-alpha.1", - "lockfileVersion": 1, + "version": "0.28.0-alpha.36", + "lockfileVersion": 2, "requires": true, - "dependencies": { - "@types/node": { - "version": "10.11.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.11.0.tgz", - "integrity": "sha512-R4Dvw6KjSYn/SpvjRchBOwXr14vVVcFXCtnM3f0aLvlJS8a599rrcEoihcP2/+Z/f75E5GNPd4aWM7j1yei9og==", - "dev": true - }, - "abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" - }, - "ajv": { - "version": "5.5.2", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz", - "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=", - "requires": { - "co": "^4.6.0", - "fast-deep-equal": "^1.0.0", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.3.0" + "packages": { + "": { + "name": "nodegit", + "version": "0.28.0-alpha.36", + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@mapbox/node-pre-gyp": "^2.0.0", + "fs-extra": "^7.0.0", + "got": "^14.4.7", + "json5": "^2.1.0", + "lodash": "^4.17.14", + "nan": "^2.23.1", + "node-gyp": "^11.2.0", + "tar-fs": "^3.0.9" + }, + "devDependencies": { + "aws-sdk": "^2.1095.0", + "clean-for-publish": "~1.0.2", + "combyne": "~0.8.1", + "js-beautify": "~1.5.10", + "jshint": "^2.10.0", + "lcov-result-merger": "^3.1.0", + "mocha": "^11.4.0", + "nyc": "^17.1.0", + "walk": "^2.3.9" + }, + "engines": { + "node": ">= 20" } }, - "amdefine": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/amdefine/-/amdefine-1.0.1.tgz", - "integrity": "sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=", - "dev": true, - "optional": true - }, - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=" - }, - "ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true - }, - "anymatch": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", - "integrity": "sha512-0XNayC8lTHQ2OI8aljNCN3sSx6hsr/1+rlcDAotXJR7C1oZZHCNsfpbKwMjRA3Uqb5tF1Rae2oloTr4xpq+WjA==", + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", "dev": true, - "optional": true, - "requires": { - "micromatch": "^2.1.5", - "normalize-path": "^2.0.0" + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" } }, - "append-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/append-buffer/-/append-buffer-1.0.2.tgz", - "integrity": "sha1-2CIM9GYIFSXv6lBhTz3mUU36WPE=", + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", "dev": true, - "requires": { - "buffer-equal": "^1.0.0" + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" } }, - "aproba": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", - "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==" - }, - "are-we-there-yet": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz", - "integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==", - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^2.0.6" + "node_modules/@babel/compat-data": { + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.5.tgz", + "integrity": "sha512-XvcZi1KWf88RVbF9wn8MN6tYFloU5qX8KjuF3E1PVBmJ9eypXfs4GRiJwLuTZL0iSnJUKn1BFPa5BPZZJyFzPg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" } }, - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "node_modules/@babel/core": { + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.7.tgz", + "integrity": "sha512-SRijHmF0PSPgLIBYlWnG0hyeJLwXE2CgpsXaMOrtt2yp9/86ALw6oUlj9KYuZ0JN07T4eBMVIW4li/9S1j2BGA==", "dev": true, - "requires": { - "sprintf-js": "~1.0.2" + "license": "MIT", + "peer": true, + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.5", + "@babel/helper-compilation-targets": "^7.26.5", + "@babel/helper-module-transforms": "^7.26.0", + "@babel/helpers": "^7.26.7", + "@babel/parser": "^7.26.7", + "@babel/template": "^7.25.9", + "@babel/traverse": "^7.26.7", + "@babel/types": "^7.26.7", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" } }, - "arr-diff": { + "node_modules/@babel/core/node_modules/convert-source-map": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz", - "integrity": "sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8=", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", "dev": true, - "optional": true, - "requires": { - "arr-flatten": "^1.0.1" - } - }, - "arr-flatten": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", - "dev": true - }, - "arr-union": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", - "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", - "dev": true + "license": "MIT" }, - "array-unique": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz", - "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=", + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "optional": true - }, - "asap": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", - "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=" - }, - "asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "requires": { - "safer-buffer": "~2.1.0" + "license": "ISC", + "bin": { + "semver": "bin/semver.js" } }, - "assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" - }, - "assign-symbols": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", - "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", - "dev": true - }, - "async": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/async/-/async-1.5.2.tgz", - "integrity": "sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo=", - "dev": true - }, - "async-each": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.1.tgz", - "integrity": "sha1-GdOGodntxufByF04iu28xW0zYC0=", + "node_modules/@babel/generator": { + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.5.tgz", + "integrity": "sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw==", "dev": true, - "optional": true - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "atob": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", - "dev": true + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.26.5", + "@babel/types": "^7.26.5", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } }, - "aws-sdk": { - "version": "2.326.0", - "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.326.0.tgz", - "integrity": "sha512-R8CzUxH7TKsmQTT59CoXGQSXYscmc5TiU/OXb/R0xq1WYEngiznBy+J+cahJOjugSjN+5VQWTGbEzBC3Bc75kQ==", + "node_modules/@babel/helper-compilation-targets": { + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz", + "integrity": "sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==", "dev": true, - "requires": { - "buffer": "4.9.1", - "events": "1.1.1", - "ieee754": "1.1.8", - "jmespath": "0.15.0", - "querystring": "0.2.0", - "sax": "1.2.1", - "url": "0.10.3", - "uuid": "3.1.0", - "xml2js": "0.4.19" - }, + "license": "MIT", "dependencies": { - "uuid": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.1.0.tgz", - "integrity": "sha512-DIWtzUkw04M4k3bf1IcpS2tngXEL26YUD2M0tMDUpnUrz2hgzUBlD55a4FjdLGPvfHxS6uluGWvaVEqgBcVa+g==", - "dev": true - } + "@babel/compat-data": "^7.26.5", + "@babel/helper-validator-option": "^7.25.9", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" } }, - "aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" - }, - "aws4": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.8.0.tgz", - "integrity": "sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ==" - }, - "babel-cli": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-cli/-/babel-cli-6.26.0.tgz", - "integrity": "sha1-UCq1SHTX24itALiHoGODzgPQAvE=", + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, - "requires": { - "babel-core": "^6.26.0", - "babel-polyfill": "^6.26.0", - "babel-register": "^6.26.0", - "babel-runtime": "^6.26.0", - "chokidar": "^1.6.1", - "commander": "^2.11.0", - "convert-source-map": "^1.5.0", - "fs-readdir-recursive": "^1.0.0", - "glob": "^7.1.2", - "lodash": "^4.17.4", - "output-file-sync": "^1.1.2", - "path-is-absolute": "^1.0.1", - "slash": "^1.0.0", - "source-map": "^0.5.6", - "v8flags": "^2.1.1" - } - }, - "babel-code-frame": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz", - "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=", - "dev": true, - "requires": { - "chalk": "^1.1.3", - "esutils": "^2.0.2", - "js-tokens": "^3.0.2" - } - }, - "babel-core": { - "version": "6.26.3", - "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.3.tgz", - "integrity": "sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==", - "dev": true, - "requires": { - "babel-code-frame": "^6.26.0", - "babel-generator": "^6.26.0", - "babel-helpers": "^6.24.1", - "babel-messages": "^6.23.0", - "babel-register": "^6.26.0", - "babel-runtime": "^6.26.0", - "babel-template": "^6.26.0", - "babel-traverse": "^6.26.0", - "babel-types": "^6.26.0", - "babylon": "^6.18.0", - "convert-source-map": "^1.5.1", - "debug": "^2.6.9", - "json5": "^0.5.1", - "lodash": "^4.17.4", - "minimatch": "^3.0.4", - "path-is-absolute": "^1.0.1", - "private": "^0.1.8", - "slash": "^1.0.0", - "source-map": "^0.5.7" + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" } }, - "babel-generator": { - "version": "6.26.1", - "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.1.tgz", - "integrity": "sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==", + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "requires": { - "babel-messages": "^6.23.0", - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "detect-indent": "^4.0.0", - "jsesc": "^1.3.0", - "lodash": "^4.17.4", - "source-map": "^0.5.7", - "trim-right": "^1.0.1" + "license": "ISC", + "bin": { + "semver": "bin/semver.js" } }, - "babel-helper-call-delegate": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz", - "integrity": "sha1-7Oaqzdx25Bw0YfiL/Fdb0Nqi340=", + "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", "dev": true, - "requires": { - "babel-helper-hoist-variables": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" - } + "license": "ISC" }, - "babel-helper-define-map": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz", - "integrity": "sha1-pfVtq0GiX5fstJjH66ypgZ+Vvl8=", + "node_modules/@babel/helper-module-imports": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", + "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", "dev": true, - "requires": { - "babel-helper-function-name": "^6.24.1", - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "lodash": "^4.17.4" + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" } }, - "babel-helper-function-name": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz", - "integrity": "sha1-00dbjAPtmCQqJbSDUasYOZ01gKk=", + "node_modules/@babel/helper-module-transforms": { + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", + "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", "dev": true, - "requires": { - "babel-helper-get-function-arity": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9", + "@babel/traverse": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "babel-helper-get-function-arity": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz", - "integrity": "sha1-j3eCqpNAfEHTqlCQj4mwMbG2hT0=", + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "license": "MIT", + "engines": { + "node": ">=6.9.0" } }, - "babel-helper-hoist-variables": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz", - "integrity": "sha1-HssnaJydJVE+rbyZFKc/VAi+enY=", + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "license": "MIT", + "engines": { + "node": ">=6.9.0" } }, - "babel-helper-optimise-call-expression": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz", - "integrity": "sha1-96E0J7qfc/j0+pk8VKl4gtEkQlc=", + "node_modules/@babel/helper-validator-option": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", + "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "license": "MIT", + "engines": { + "node": ">=6.9.0" } }, - "babel-helper-regex": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz", - "integrity": "sha1-MlxZ+QL4LyS3T6zu0DY5VPZJXnI=", + "node_modules/@babel/helpers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.1.tgz", + "integrity": "sha512-FCvFTm0sWV8Fxhpp2McP5/W53GPllQ9QeQ7SiqGWjMf/LVG07lFa5+pgK05IRhVwtvafT22KF+ZSnM9I545CvQ==", "dev": true, - "requires": { - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "lodash": "^4.17.4" + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" } }, - "babel-helper-replace-supers": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz", - "integrity": "sha1-v22/5Dk40XNpohPKiov3S2qQqxo=", + "node_modules/@babel/parser": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.2.tgz", + "integrity": "sha512-QYLs8299NA7WM/bZAdp+CviYYkVoYXlDW2rzliy3chxd1PQjej7JORuMJDJXJUb9g0TT+B99EwaVLKmX+sPXWw==", "dev": true, - "requires": { - "babel-helper-optimise-call-expression": "^6.24.1", - "babel-messages": "^6.23.0", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.1" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" } }, - "babel-helpers": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helpers/-/babel-helpers-6.24.1.tgz", - "integrity": "sha1-NHHenK7DiOXIUOWX5Yom3fN2ArI=", + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" } }, - "babel-messages": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-messages/-/babel-messages-6.23.0.tgz", - "integrity": "sha1-8830cDhYA1sqKVHG7F7fbGLyYw4=", + "node_modules/@babel/traverse": { + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.7.tgz", + "integrity": "sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA==", "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.5", + "@babel/parser": "^7.26.7", + "@babel/template": "^7.25.9", + "@babel/types": "^7.26.7", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" } }, - "babel-plugin-check-es2015-constants": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz", - "integrity": "sha1-NRV7EBQm/S/9PaP3XH0ekYNbv4o=", + "node_modules/@babel/types": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.1.tgz", + "integrity": "sha512-+EzkxvLNfiUeKMgy/3luqfsCWFRXLb7U6wNQTk60tovuckwB15B191tJWvpp4HjiQWdJkCxO3Wbvc6jlk3Xb2Q==", "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" } }, - "babel-plugin-transform-es2015-arrow-functions": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz", - "integrity": "sha1-RSaSy3EdX3ncf4XkQM5BufJE0iE=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" } }, - "babel-plugin-transform-es2015-block-scoped-functions": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz", - "integrity": "sha1-u8UbSflk1wy42OC5ToICRs46YUE=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" } }, - "babel-plugin-transform-es2015-block-scoping": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz", - "integrity": "sha1-1w9SmcEwjQXBL0Y4E7CgnnOxiV8=", - "dev": true, - "requires": { - "babel-runtime": "^6.26.0", - "babel-template": "^6.26.0", - "babel-traverse": "^6.26.0", - "babel-types": "^6.26.0", - "lodash": "^4.17.4" + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "babel-plugin-transform-es2015-classes": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz", - "integrity": "sha1-WkxYpQyclGHlZLSyo7+ryXolhNs=", - "dev": true, - "requires": { - "babel-helper-define-map": "^6.24.1", - "babel-helper-function-name": "^6.24.1", - "babel-helper-optimise-call-expression": "^6.24.1", - "babel-helper-replace-supers": "^6.24.1", - "babel-messages": "^6.23.0", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "babel-plugin-transform-es2015-computed-properties": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz", - "integrity": "sha1-b+Ko0WiV1WNPTNmZttNICjCBWbM=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, - "babel-plugin-transform-es2015-destructuring": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz", - "integrity": "sha1-mXux8auWf2gtKwh2/jWNYOdlxW0=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "babel-plugin-transform-es2015-duplicate-keys": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz", - "integrity": "sha1-c+s9MQypaePvnskcU3QabxV2Qj4=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "license": "ISC", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" } }, - "babel-plugin-transform-es2015-for-of": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz", - "integrity": "sha1-9HyVsrYT3x0+zC/bdXNiPHUkhpE=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "node_modules/@isaacs/fs-minipass/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" } }, - "babel-plugin-transform-es2015-function-name": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz", - "integrity": "sha1-g0yJhTvDaxrw86TF26qU/Y6sqos=", + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", "dev": true, - "requires": { - "babel-helper-function-name": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "license": "ISC", + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" } }, - "babel-plugin-transform-es2015-literals": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz", - "integrity": "sha1-T1SgLWzWbPkVKAAZox0xklN3yi4=", + "node_modules/@istanbuljs/load-nyc-config/node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "license": "MIT", + "engines": { + "node": ">=6" } }, - "babel-plugin-transform-es2015-modules-amd": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz", - "integrity": "sha1-Oz5UAXI5hC1tGcMBHEvS8AoA0VQ=", + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true, - "requires": { - "babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "license": "MIT", + "engines": { + "node": ">=8" } }, - "babel-plugin-transform-es2015-modules-commonjs": { - "version": "6.26.2", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz", - "integrity": "sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==", + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", "dev": true, - "requires": { - "babel-plugin-transform-strict-mode": "^6.24.1", - "babel-runtime": "^6.26.0", - "babel-template": "^6.26.0", - "babel-types": "^6.26.0" + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" } }, - "babel-plugin-transform-es2015-modules-systemjs": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz", - "integrity": "sha1-/4mhQrkRmpBhlfXxBuzzBdlAfSM=", + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "dev": true, - "requires": { - "babel-helper-hoist-variables": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "license": "MIT", + "engines": { + "node": ">=6.0.0" } }, - "babel-plugin-transform-es2015-modules-umd": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz", - "integrity": "sha1-rJl+YoXNGO1hdq22B9YCNErThGg=", + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "dev": true, - "requires": { - "babel-plugin-transform-es2015-modules-amd": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "license": "MIT", + "engines": { + "node": ">=6.0.0" } }, - "babel-plugin-transform-es2015-object-super": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz", - "integrity": "sha1-JM72muIcuDp/hgPa0CH1cusnj40=", + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", "dev": true, - "requires": { - "babel-helper-replace-supers": "^6.24.1", - "babel-runtime": "^6.22.0" - } + "license": "MIT" }, - "babel-plugin-transform-es2015-parameters": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz", - "integrity": "sha1-V6w1GrScrxSpfNE7CfZv3wpiXys=", + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dev": true, - "requires": { - "babel-helper-call-delegate": "^6.24.1", - "babel-helper-get-function-arity": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, - "babel-plugin-transform-es2015-shorthand-properties": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz", - "integrity": "sha1-JPh11nIch2YbvZmkYi5R8U3jiqA=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "node_modules/@mapbox/node-pre-gyp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-2.0.0.tgz", + "integrity": "sha512-llMXd39jtP0HpQLVI37Bf1m2ADlEb35GYSh1SDSLsBhR+5iCxiNGlT31yqbNtVHygHAtMy6dWFERpU2JgufhPg==", + "license": "BSD-3-Clause", + "dependencies": { + "consola": "^3.2.3", + "detect-libc": "^2.0.0", + "https-proxy-agent": "^7.0.5", + "node-fetch": "^2.6.7", + "nopt": "^8.0.0", + "semver": "^7.5.3", + "tar": "^7.4.0" + }, + "bin": { + "node-pre-gyp": "bin/node-pre-gyp" + }, + "engines": { + "node": ">=18" } }, - "babel-plugin-transform-es2015-spread": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz", - "integrity": "sha1-1taKmfia7cRTbIGlQujdnxdG+NE=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "node_modules/@npmcli/agent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/agent/-/agent-3.0.0.tgz", + "integrity": "sha512-S79NdEgDQd/NGCay6TCoVzXSj74skRZIKJcpJjC5lOq34SZzyI6MqtiiWoiVWoVrTcGjNeC4ipbh1VIHlpfF5Q==", + "license": "ISC", + "dependencies": { + "agent-base": "^7.1.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.1", + "lru-cache": "^10.0.1", + "socks-proxy-agent": "^8.0.3" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" } }, - "babel-plugin-transform-es2015-sticky-regex": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz", - "integrity": "sha1-AMHNsaynERLN8M9hJsLta0V8zbw=", - "dev": true, - "requires": { - "babel-helper-regex": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "node_modules/@npmcli/fs": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-4.0.0.tgz", + "integrity": "sha512-/xGlezI6xfGO9NwuJlnwz/K14qD1kCSAGtacBHnGzeAIuJGazcp45KP5NuyARXoKb7cwulAGWVsbeSxdG/cb0Q==", + "license": "ISC", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" } }, - "babel-plugin-transform-es2015-template-literals": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz", - "integrity": "sha1-qEs0UPfp+PH2g51taH2oS7EjbY0=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@sec-ant/readable-stream": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz", + "integrity": "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==", + "license": "MIT" + }, + "node_modules/@sindresorhus/is": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-7.0.1.tgz", + "integrity": "sha512-QWLl2P+rsCJeofkDNIT3WFmb6NrRud1SUYW8dIhXK/46XFV8Q/g7Bsvib0Askb0reRLe+WYPeeE+l5cH7SlkuQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" } }, - "babel-plugin-transform-es2015-typeof-symbol": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz", - "integrity": "sha1-3sCfHN3/lLUqxz1QXITfWdzOs3I=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0" + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "license": "MIT", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" } }, - "babel-plugin-transform-es2015-unicode-regex": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz", - "integrity": "sha1-04sS9C6nMj9yk4fxinxa4frrNek=", - "dev": true, - "requires": { - "babel-helper-regex": "^6.24.1", - "babel-runtime": "^6.22.0", - "regexpu-core": "^2.0.0" + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==", + "license": "MIT" + }, + "node_modules/abbrev": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.0.9.tgz", + "integrity": "sha1-kbR5JYinc4wl813W9jdSovh3YTU=", + "dev": true + }, + "node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "license": "MIT", + "engines": { + "node": ">= 14" } }, - "babel-plugin-transform-regenerator": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz", - "integrity": "sha1-4HA2lvveJ/Cj78rPi03KL3s6jy8=", + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", "dev": true, - "requires": { - "regenerator-transform": "^0.10.0" + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" } }, - "babel-plugin-transform-strict-mode": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz", - "integrity": "sha1-1fr3qleKZbvlkc9e2uBKDGcCB1g=", - "dev": true, - "requires": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" } }, - "babel-polyfill": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-polyfill/-/babel-polyfill-6.26.0.tgz", - "integrity": "sha1-N5k3q8Z9eJWXCtxiHyhM2WbPIVM=", - "dev": true, - "requires": { - "babel-runtime": "^6.26.0", - "core-js": "^2.5.0", - "regenerator-runtime": "^0.10.5" - }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dependencies": { - "regenerator-runtime": { - "version": "0.10.5", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz", - "integrity": "sha1-M2w+/BIgrc7dosn6tntaeVWjNlg=", - "dev": true - } + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "babel-preset-es2015": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-preset-es2015/-/babel-preset-es2015-6.24.1.tgz", - "integrity": "sha1-1EBQ1rwsn+6nAqrzjXJ6AhBTiTk=", + "node_modules/append-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/append-buffer/-/append-buffer-1.0.2.tgz", + "integrity": "sha1-2CIM9GYIFSXv6lBhTz3mUU36WPE=", "dev": true, - "requires": { - "babel-plugin-check-es2015-constants": "^6.22.0", - "babel-plugin-transform-es2015-arrow-functions": "^6.22.0", - "babel-plugin-transform-es2015-block-scoped-functions": "^6.22.0", - "babel-plugin-transform-es2015-block-scoping": "^6.24.1", - "babel-plugin-transform-es2015-classes": "^6.24.1", - "babel-plugin-transform-es2015-computed-properties": "^6.24.1", - "babel-plugin-transform-es2015-destructuring": "^6.22.0", - "babel-plugin-transform-es2015-duplicate-keys": "^6.24.1", - "babel-plugin-transform-es2015-for-of": "^6.22.0", - "babel-plugin-transform-es2015-function-name": "^6.24.1", - "babel-plugin-transform-es2015-literals": "^6.22.0", - "babel-plugin-transform-es2015-modules-amd": "^6.24.1", - "babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", - "babel-plugin-transform-es2015-modules-systemjs": "^6.24.1", - "babel-plugin-transform-es2015-modules-umd": "^6.24.1", - "babel-plugin-transform-es2015-object-super": "^6.24.1", - "babel-plugin-transform-es2015-parameters": "^6.24.1", - "babel-plugin-transform-es2015-shorthand-properties": "^6.24.1", - "babel-plugin-transform-es2015-spread": "^6.22.0", - "babel-plugin-transform-es2015-sticky-regex": "^6.24.1", - "babel-plugin-transform-es2015-template-literals": "^6.22.0", - "babel-plugin-transform-es2015-typeof-symbol": "^6.22.0", - "babel-plugin-transform-es2015-unicode-regex": "^6.24.1", - "babel-plugin-transform-regenerator": "^6.24.1" + "dependencies": { + "buffer-equal": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" } }, - "babel-register": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-register/-/babel-register-6.26.0.tgz", - "integrity": "sha1-btAhFz4vy0htestFxgCahW9kcHE=", + "node_modules/append-transform": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/append-transform/-/append-transform-2.0.0.tgz", + "integrity": "sha512-7yeyCEurROLQJFv5Xj4lEGTy0borxepjFv1g22oAdqFu//SrAlDl1O1Nxx15SH1RoliUml6p8dwJW9jvZughhg==", "dev": true, - "requires": { - "babel-core": "^6.26.0", - "babel-runtime": "^6.26.0", - "core-js": "^2.5.0", - "home-or-tmp": "^2.0.0", - "lodash": "^4.17.4", - "mkdirp": "^0.5.1", - "source-map-support": "^0.4.15" + "license": "MIT", + "dependencies": { + "default-require-extensions": "^3.0.0" + }, + "engines": { + "node": ">=8" } }, - "babel-runtime": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", - "integrity": "sha1-llxwWGaOgrVde/4E/yM3vItWR/4=", + "node_modules/archy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", + "integrity": "sha512-Xg+9RwCg/0p32teKdGMPTPnVXKD0w3DfHnFTficozsAgsvq2XenPJq/MYpzzQ/v8zrOyJn6Ds39VA4JIDwFfqw==", "dev": true, - "requires": { - "core-js": "^2.4.0", - "regenerator-runtime": "^0.11.0" - } + "license": "MIT" }, - "babel-template": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-template/-/babel-template-6.26.0.tgz", - "integrity": "sha1-3gPi0WOWsGn0bdn/+FIfsaDjXgI=", + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, - "requires": { - "babel-runtime": "^6.26.0", - "babel-traverse": "^6.26.0", - "babel-types": "^6.26.0", - "babylon": "^6.18.0", - "lodash": "^4.17.4" + "dependencies": { + "sprintf-js": "~1.0.2" } }, - "babel-traverse": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-traverse/-/babel-traverse-6.26.0.tgz", - "integrity": "sha1-RqnL1+3MYsjlwGTi0tjQ9ANXZu4=", + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", "dev": true, - "requires": { - "babel-code-frame": "^6.26.0", - "babel-messages": "^6.23.0", - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "babylon": "^6.18.0", - "debug": "^2.6.8", - "globals": "^9.18.0", - "invariant": "^2.2.2", - "lodash": "^4.17.4" + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "babel-types": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz", - "integrity": "sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc=", + "node_modules/aws-sdk": { + "version": "2.1692.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1692.0.tgz", + "integrity": "sha512-x511uiJ/57FIsbgUe5csJ13k3uzu25uWQE+XqfBis/sB0SFoiElJWXRkgEAUh0U6n40eT3ay5Ue4oPkRMu1LYw==", "dev": true, - "requires": { - "babel-runtime": "^6.26.0", - "esutils": "^2.0.2", - "lodash": "^4.17.4", - "to-fast-properties": "^1.0.3" - } - }, - "babylon": { - "version": "6.18.0", - "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", - "dev": true - }, - "balanced-match": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" - }, - "base": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", - "dev": true, - "requires": { - "cache-base": "^1.0.1", - "class-utils": "^0.3.5", - "component-emitter": "^1.2.1", - "define-property": "^1.0.0", - "isobject": "^3.0.1", - "mixin-deep": "^1.2.0", - "pascalcase": "^0.1.1" - }, + "hasInstallScript": true, + "license": "Apache-2.0", "dependencies": { - "define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, - "requires": { - "is-descriptor": "^1.0.0" - } - }, - "is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, - "requires": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - } - }, - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - }, - "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true - } + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.16.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "util": "^0.12.4", + "uuid": "8.0.0", + "xml2js": "0.6.2" + }, + "engines": { + "node": ">= 10.0.0" } }, - "base64-js": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.0.tgz", - "integrity": "sha512-ccav/yGvoa80BQDljCxsmmQ3Xvx60/UpBIij5QN21W3wBi/hhIC9OoO+KLpu9IJTS9j4DRVJ3aDDF9cMSoa2lw==", - "dev": true + "node_modules/b4a": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.7.tgz", + "integrity": "sha512-OnAYlL5b7LEkALw87fUVafQw5rVR9RjwGd4KUwNQ6DrrNmaVaUCgLipfVlzrPQ4tWOR9P0IXGNOx50jYCCdSJg==", + "license": "Apache-2.0" }, - "bcrypt-pbkdf": { + "node_modules/balanced-match": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/bare-events": { + "version": "2.5.4", + "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.5.4.tgz", + "integrity": "sha512-+gFfDkR8pj4/TrWCGUGWmJIkBwuxPS5F+a5yWjOHQt2hHvNZd5YLzadjmDUtFmMM4y429bnKLa8bYBMHcYdnQA==", + "license": "Apache-2.0", + "optional": true + }, + "node_modules/bare-fs": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-4.1.5.tgz", + "integrity": "sha512-1zccWBMypln0jEE05LzZt+V/8y8AQsQQqxtklqaIyg5nu6OAYFhZxPXinJTSG+kU5qyNmeLgcn9AW7eHiCHVLA==", + "license": "Apache-2.0", "optional": true, - "requires": { - "tweetnacl": "^0.14.3" + "dependencies": { + "bare-events": "^2.5.4", + "bare-path": "^3.0.0", + "bare-stream": "^2.6.4" + }, + "engines": { + "bare": ">=1.16.0" + }, + "peerDependencies": { + "bare-buffer": "*" + }, + "peerDependenciesMeta": { + "bare-buffer": { + "optional": true + } } }, - "binary-extensions": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.11.0.tgz", - "integrity": "sha1-RqoXUftqL5PuXmibsQh9SxTGwgU=", - "dev": true, - "optional": true + "node_modules/bare-os": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/bare-os/-/bare-os-3.6.1.tgz", + "integrity": "sha512-uaIjxokhFidJP+bmmvKSgiMzj2sV5GPHaZVAIktcxcpCyBFFWO+YlikVAdhmUo2vYFvFhOXIAlldqV29L8126g==", + "license": "Apache-2.0", + "optional": true, + "engines": { + "bare": ">=1.14.0" + } }, - "bl": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.2.tgz", - "integrity": "sha512-e8tQYnZodmebYDWGH7KMRvtzKXaJHx3BbilrgZCfvyLUYdKpK1t5PSPmpkny/SgiTSCnjfLW7v5rlONXVFkQEA==", - "requires": { - "readable-stream": "^2.3.5", - "safe-buffer": "^5.1.1" + "node_modules/bare-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bare-path/-/bare-path-3.0.0.tgz", + "integrity": "sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==", + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "bare-os": "^3.0.1" } }, - "block-stream": { - "version": "0.0.9", - "resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", - "integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", - "requires": { - "inherits": "~2.0.0" + "node_modules/bare-stream": { + "version": "2.6.5", + "resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.6.5.tgz", + "integrity": "sha512-jSmxKJNJmHySi6hC42zlZnq00rga4jjxcgNZjY9N5WlOe/iOoGRtdwGsHzQv2RlH2KOYMwGUXhf2zXd32BA9RA==", + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "streamx": "^2.21.0" + }, + "peerDependencies": { + "bare-buffer": "*", + "bare-events": "*" + }, + "peerDependenciesMeta": { + "bare-buffer": { + "optional": true + }, + "bare-events": { + "optional": true + } } }, - "boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=", - "dev": true + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] }, - "brace-expansion": { + "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "requires": { + "dev": true, + "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, - "braces": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz", - "integrity": "sha1-uneWLhLf+WnWt2cR6RS3N4V79qc=", - "dev": true, - "optional": true, - "requires": { - "expand-range": "^1.8.1", - "preserve": "^0.2.0", - "repeat-element": "^1.1.2" - } - }, - "browser-stdout": { + "node_modules/browser-stdout": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", "dev": true }, - "buffer": { - "version": "4.9.1", - "resolved": "http://registry.npmjs.org/buffer/-/buffer-4.9.1.tgz", - "integrity": "sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg=", + "node_modules/browserslist": { + "version": "4.24.4", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", + "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==", "dev": true, - "requires": { + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "caniuse-lite": "^1.0.30001688", + "electron-to-chromium": "^1.5.73", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.1" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "dev": true, + "dependencies": { "base64-js": "^1.0.2", "ieee754": "^1.1.4", "isarray": "^1.0.0" } }, - "buffer-alloc": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", - "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", - "requires": { - "buffer-alloc-unsafe": "^1.1.0", - "buffer-fill": "^1.0.0" - } - }, - "buffer-alloc-unsafe": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", - "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" - }, - "buffer-equal": { + "node_modules/buffer-equal": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-1.0.0.tgz", "integrity": "sha1-WWFrSYME1Var1GaWayLu2j7KX74=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.4.0" + } }, - "buffer-fill": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", - "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=" + "node_modules/cacache": { + "version": "19.0.1", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-19.0.1.tgz", + "integrity": "sha512-hdsUxulXCi5STId78vRVYEtDAjq99ICAUktLTeTYsLoTE6Z8dS0c8pWNCxwdrk9YfJeobDZc2Y186hD/5ZQgFQ==", + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^4.0.0", + "fs-minipass": "^3.0.0", + "glob": "^10.2.2", + "lru-cache": "^10.0.1", + "minipass": "^7.0.3", + "minipass-collect": "^2.0.1", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^7.0.2", + "ssri": "^12.0.0", + "tar": "^7.4.3", + "unique-filename": "^4.0.0" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } }, - "buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", + "node_modules/cacache/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/cacache/node_modules/fs-minipass": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/cacache/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cacache/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cacache/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "license": "MIT", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-12.0.1.tgz", + "integrity": "sha512-Yo9wGIQUaAfIbk+qY0X4cDQgCosecfBe3V9NSyeY4qPC2SAkbCS4Xj79VP8WOzitpJUZKc/wsRCYF5ariDIwkg==", + "license": "MIT", + "dependencies": { + "@types/http-cache-semantics": "^4.0.4", + "get-stream": "^9.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.4", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.1", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/cacheable-request/node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caching-transform": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz", + "integrity": "sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA==", "dev": true, - "optional": true + "license": "MIT", + "dependencies": { + "hasha": "^5.0.0", + "make-dir": "^3.0.0", + "package-hash": "^4.0.0", + "write-file-atomic": "^3.0.0" + }, + "engines": { + "node": ">=8" + } }, - "cache-base": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", "dev": true, - "requires": { - "collection-visit": "^1.0.0", - "component-emitter": "^1.2.1", - "get-value": "^2.0.6", - "has-value": "^1.0.0", - "isobject": "^3.0.1", - "set-value": "^2.0.0", - "to-object-path": "^0.3.0", - "union-value": "^1.0.0", - "unset-value": "^1.0.0" + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", "dependencies": { - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - } + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" } }, - "camelcase": { + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/camelcase": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz", "integrity": "sha1-m7UwTS4LVmmLLHWLCKPqqdqlijk=", - "dev": true - }, - "caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", "dev": true, - "requires": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" + "engines": { + "node": ">=0.10.0" } }, - "cheerio": { - "version": "1.0.0-rc.2", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.2.tgz", - "integrity": "sha1-S59TqBsn5NXawxwP/Qz6A8xoMNs=", + "node_modules/caniuse-lite": { + "version": "1.0.30001696", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001696.tgz", + "integrity": "sha512-pDCPkvzfa39ehJtJ+OwGT/2yvT2SbjfHhiIW2LWOAcMQ7BzwxT/XuyUp4OTOd0XFWA6BKw0JalnBHgSi5DGJBQ==", "dev": true, - "requires": { - "css-select": "~1.2.0", - "dom-serializer": "~0.1.0", - "entities": "~1.1.1", - "htmlparser2": "^3.9.1", - "lodash": "^4.15.0", - "parse5": "^3.0.1" - }, - "dependencies": { - "entities": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.1.tgz", - "integrity": "sha1-blwtClYhtdra7O+AuQ7ftc13cvA=", - "dev": true + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" }, - "htmlparser2": { - "version": "3.9.2", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.9.2.tgz", - "integrity": "sha1-G9+HrMoPP55T+k/M6w9LTLsAszg=", - "dev": true, - "requires": { - "domelementtype": "^1.3.0", - "domhandler": "^2.3.0", - "domutils": "^1.5.1", - "entities": "^1.1.1", - "inherits": "^2.0.1", - "readable-stream": "^2.0.2" - } + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } - } + ], + "license": "CC-BY-4.0" }, - "chokidar": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-1.7.0.tgz", - "integrity": "sha1-eY5ol3gVHIB2tLNg5e3SjNortGg=", + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, - "optional": true, - "requires": { - "anymatch": "^1.3.0", - "async-each": "^1.0.0", - "fsevents": "^1.0.0", - "glob-parent": "^2.0.0", - "inherits": "^2.0.1", - "is-binary-path": "^1.0.0", - "is-glob": "^2.0.0", - "path-is-absolute": "^1.0.0", - "readdirp": "^2.0.0" + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "chownr": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.1.tgz", - "integrity": "sha512-j38EvO5+LHX84jlo6h4UzmOwi0UgW61WRyPtJz4qaadK5eY3BTS5TY/S1Stc3Uk2lIM6TPevAlULiEJwie860g==" - }, - "class-utils": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, - "requires": { - "arr-union": "^3.1.0", - "define-property": "^0.2.5", - "isobject": "^3.0.0", - "static-extend": "^0.1.1" + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "license": "MIT", "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - } + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" } }, - "clean-for-publish": { + "node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/clean-for-publish": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/clean-for-publish/-/clean-for-publish-1.0.4.tgz", "integrity": "sha1-KZMj50qzSwXSIHBsWd+B3QTKAYo=", "dev": true, - "requires": { + "dependencies": { "fs-extra": "^0.26.2", "glob": "~5.0.15", "yargs": "~3.29.0" }, + "bin": { + "clean-for-publish": "lib/cli.js" + } + }, + "node_modules/clean-for-publish/node_modules/fs-extra": { + "version": "0.26.7", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-0.26.7.tgz", + "integrity": "sha1-muH92UiXeY7at20JGM9C0MMYT6k=", + "dev": true, "dependencies": { - "fs-extra": { - "version": "0.26.7", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-0.26.7.tgz", - "integrity": "sha1-muH92UiXeY7at20JGM9C0MMYT6k=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "jsonfile": "^2.1.0", - "klaw": "^1.0.0", - "path-is-absolute": "^1.0.0", - "rimraf": "^2.2.8" - } - }, - "glob": { - "version": "5.0.15", - "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", - "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", - "dev": true, - "requires": { - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "2 || 3", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - } + "graceful-fs": "^4.1.2", + "jsonfile": "^2.1.0", + "klaw": "^1.0.0", + "path-is-absolute": "^1.0.0", + "rimraf": "^2.2.8" } }, - "cli": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cli/-/cli-1.0.1.tgz", - "integrity": "sha1-IoF1NPJL+klQw01TLUjsvGIbjBQ=", + "node_modules/clean-for-publish/node_modules/jsonfile": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-2.4.0.tgz", + "integrity": "sha1-NzaitCi4e72gzIO1P6PWM6NcKug=", "dev": true, - "requires": { - "exit": "0.1.2", - "glob": "^7.1.1" + "optionalDependencies": { + "graceful-fs": "^4.1.6" } }, - "cliui": { + "node_modules/clean-for-publish/node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/clean-for-publish/node_modules/rimraf/node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/cli": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cli/-/cli-1.0.1.tgz", + "integrity": "sha1-IoF1NPJL+klQw01TLUjsvGIbjBQ=", + "dev": true, + "dependencies": { + "exit": "0.1.2", + "glob": "^7.1.1" + }, + "engines": { + "node": ">=0.2.5" + } + }, + "node_modules/cli/node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cliui": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", "dev": true, - "requires": { + "dependencies": { "string-width": "^1.0.1", "strip-ansi": "^3.0.1", "wrap-ansi": "^2.0.0" } }, - "clone": { + "node_modules/cliui/node_modules/ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cliui/node_modules/is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true, + "dependencies": { + "number-is-nan": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cliui/node_modules/string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true, + "dependencies": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "dependencies": { + "ansi-regex": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clone": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", "integrity": "sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.8" + } }, - "clone-buffer": { + "node_modules/clone-buffer": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/clone-buffer/-/clone-buffer-1.0.0.tgz", "integrity": "sha1-4+JbIHrE5wGvch4staFnksrD3Fg=", - "dev": true + "dev": true, + "engines": { + "node": ">= 0.10" + } }, - "clone-stats": { + "node_modules/clone-stats": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/clone-stats/-/clone-stats-1.0.0.tgz", "integrity": "sha1-s3gt/4u1R04Yuba/D9/ngvh3doA=", "dev": true }, - "cloneable-readable": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/cloneable-readable/-/cloneable-readable-1.1.2.tgz", - "integrity": "sha512-Bq6+4t+lbM8vhTs/Bef5c5AdEMtapp/iFb6+s4/Hh9MVTt8OLKH7ZOOZSCT+Ys7hsHvqv0GuMPJ1lnQJVHvxpg==", + "node_modules/cloneable-readable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/cloneable-readable/-/cloneable-readable-1.1.3.tgz", + "integrity": "sha512-2EF8zTQOxYq70Y4XKtorQupqF0m49MBz2/yf5Bj+MHjvpG3Hy7sImifnqD6UA+TKYxeSV+u6qqQPawN5UvnpKQ==", "dev": true, - "requires": { + "dependencies": { "inherits": "^2.0.1", "process-nextick-args": "^2.0.0", "readable-stream": "^2.3.5" } }, - "co": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", - "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=" + "node_modules/cloneable-readable/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } }, - "code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" + "node_modules/cloneable-readable/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true }, - "collection-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", - "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", + "node_modules/cloneable-readable/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, - "requires": { - "map-visit": "^1.0.0", - "object-visit": "^1.0.0" + "dependencies": { + "safe-buffer": "~5.1.0" } }, - "colors": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.0.3.tgz", - "integrity": "sha1-BDP0TYCWgP3rYO0mDxsMJi6CpAs=", + "node_modules/code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", "dev": true, - "optional": true + "engines": { + "node": ">=0.10.0" + } }, - "combined-stream": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.6.tgz", - "integrity": "sha1-cj599ugBrFYTETp+RFqbactjKBg=", - "requires": { - "delayed-stream": "~1.0.0" + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" } }, - "combyne": { + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/combyne": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/combyne/-/combyne-0.8.1.tgz", "integrity": "sha1-WJ3kcEXVcVbcHs4YXWTDidzLR9g=", "dev": true }, - "commander": { - "version": "2.18.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.18.0.tgz", - "integrity": "sha512-6CYPa+JP2ftfRU2qkDK+UTVeQYosOg/2GbcjIcKPHfinyOLPVGXu/ovN86RP49Re5ndJK1N0kuiidFFuepc4ZQ==", - "dev": true - }, - "component-emitter": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", - "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=", - "dev": true + "node_modules/commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", + "dev": true, + "license": "MIT" }, - "concat-map": { + "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "dev": true, - "optional": true, - "requires": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - } + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true }, - "config-chain": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.12.tgz", - "integrity": "sha512-a1eOIcu8+7lUInge4Rpf/n4Krkf3Dd9lqhljRzII1/Zno/kRtUWnznPO3jOKBmTEktkt3fkxisUcivoj0ebzoA==", + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", "dev": true, - "requires": { + "dependencies": { "ini": "^1.3.4", "proto-list": "~1.2.1" } }, - "console-browserify": { + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "license": "MIT", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/console-browserify": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.1.0.tgz", "integrity": "sha1-8CQcRXMKn8YyOyBtvzjtx0HQuxA=", "dev": true, - "requires": { + "dependencies": { "date-now": "^0.1.4" } }, - "console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=" - }, - "convert-source-map": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.6.0.tgz", - "integrity": "sha512-eFu7XigvxdZ1ETfbgPBohgyQ/Z++C0eEhTor0qRwBw9unw+L0/6V8wkSuGgzdThkiS5lSpdptOQPD8Ak40a+7A==", + "node_modules/convert-source-map": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", + "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", "dev": true, - "requires": { + "dependencies": { "safe-buffer": "~5.1.1" } }, - "copy-descriptor": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", - "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", - "dev": true - }, - "core-js": { - "version": "2.5.7", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.7.tgz", - "integrity": "sha512-RszJCAxg/PP6uzXVXL6BsxSXx/B05oJAQ2vkJRjyjrEcNVycaqOmNb5OTxZPE3xa5gwZduqza6L9JOCenh/Ecw==", + "node_modules/convert-source-map/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, - "core-util-is": { + "node_modules/core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true }, - "coveralls": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/coveralls/-/coveralls-3.0.2.tgz", - "integrity": "sha512-Tv0LKe/MkBOilH2v7WBiTBdudg2ChfGbdXafc/s330djpF3zKOmuehTeRwjXWc7pzfj9FrDUTA7tEx6Div8NFw==", - "dev": true, - "requires": { - "growl": "~> 1.10.0", - "js-yaml": "^3.11.0", - "lcov-parse": "^0.0.10", - "log-driver": "^1.2.7", - "minimist": "^1.2.0", - "request": "^2.85.0" - }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", "dependencies": { - "esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true - }, - "growl": { - "version": "1.10.5", - "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", - "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", - "dev": true - }, - "js-yaml": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", - "dev": true, - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "minimist": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", - "dev": true - } - } - }, - "css-select": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-1.2.0.tgz", - "integrity": "sha1-KzoRBTnFNV8c2NMUYj6HCxIeyFg=", - "dev": true, - "requires": { - "boolbase": "~1.0.0", - "css-what": "2.1", - "domutils": "1.5.1", - "nth-check": "~1.0.1" + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" } }, - "css-what": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.0.tgz", - "integrity": "sha1-lGfQMsOM+u+58teVASUwYvh/ob0=", - "dev": true - }, - "cycle": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/cycle/-/cycle-1.0.3.tgz", - "integrity": "sha1-IegLK+hYD5i0aPN5QwZisEbDStI=", - "dev": true, - "optional": true - }, - "dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "requires": { - "assert-plus": "^1.0.0" + "node_modules/cross-spawn/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" } }, - "date-now": { + "node_modules/date-now": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/date-now/-/date-now-0.1.4.tgz", "integrity": "sha1-6vQ5/U1ISK105cx9vvIAZyueNFs=", "dev": true }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } } }, - "decamelize": { + "node_modules/decamelize": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.10.0" + } }, - "decode-uri-component": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", - "dev": true + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, - "deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==" + "node_modules/default-require-extensions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-3.0.1.tgz", + "integrity": "sha512-eXTJmRbm2TIt9MgWTsOH1wEuhew6XGZcMeGKCtLedIg/NCsg1iBePXkceTdK4Fii7pzmN9tGsZhKzZ4h7O/fxw==", + "dev": true, + "license": "MIT", + "dependencies": { + "strip-bom": "^4.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, - "deep-is": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", - "dev": true + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "license": "MIT", + "engines": { + "node": ">=10" + } }, - "define-properties": { + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", "dev": true, - "requires": { + "dependencies": { "object-keys": "^1.0.12" + }, + "engines": { + "node": ">= 0.4" } }, - "define-property": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "node_modules/detect-libc": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz", + "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==", + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-7.0.0.tgz", + "integrity": "sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw==", "dev": true, - "requires": { - "is-descriptor": "^1.0.2", - "isobject": "^3.0.1" + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "dev": true, "dependencies": { - "is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, - "requires": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - } - }, - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - }, - "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true - } + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" } }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" - }, - "delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=" - }, - "detect-indent": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz", - "integrity": "sha1-920GQ1LN9Docts5hnE7jqUdd4gg=", + "node_modules/duplexify/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, - "requires": { - "repeating": "^2.0.0" + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" } }, - "detect-libc": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", - "integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=" - }, - "diff": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", - "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", + "node_modules/duplexify/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, - "dom-serializer": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.1.0.tgz", - "integrity": "sha1-BzxpdUbOB4DOI75KKOKT5AvDDII=", + "node_modules/duplexify/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, - "requires": { - "domelementtype": "~1.1.1", - "entities": "~1.1.1" - }, "dependencies": { - "domelementtype": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.1.3.tgz", - "integrity": "sha1-vSh3PiZCiBrsUVRJJCmcXNgiGFs=", - "dev": true - }, - "entities": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.1.tgz", - "integrity": "sha1-blwtClYhtdra7O+AuQ7ftc13cvA=", - "dev": true - } + "safe-buffer": "~5.1.0" } }, - "domelementtype": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.0.tgz", - "integrity": "sha1-sXrtguirWeUt2cGbF1bg/BhyBMI=", - "dev": true - }, - "domhandler": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.3.0.tgz", - "integrity": "sha1-LeWaCCLVAn+r/28DLCsloqir5zg=", - "dev": true, - "requires": { - "domelementtype": "1" + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.90", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.90.tgz", + "integrity": "sha512-C3PN4aydfW91Natdyd449Kw+BzhLmof6tzy5W1pFC5SpQxVXT+oyiyOG9AgYYSN9OdA/ik3YkCrpwqI8ug5Tug==", + "dev": true, + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dependencies": { + "once": "^1.4.0" } }, - "domutils": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", - "integrity": "sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8=", - "dev": true, - "requires": { - "dom-serializer": "0", - "domelementtype": "1" + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "engines": { + "node": ">=6" } }, - "duplexify": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.6.0.tgz", - "integrity": "sha512-fO3Di4tBKJpYTFHAxTU00BcfWMY9w24r/x21a6rZRbsD/ToUgGxsMbiGRmB7uVAXeGKXD9MwiLZa5E97EVgIRQ==", + "node_modules/err-code": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", "dev": true, - "requires": { - "end-of-stream": "^1.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.0.0", - "stream-shift": "^1.0.0" + "license": "MIT", + "engines": { + "node": ">= 0.4" } }, - "ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "optional": true, - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" } }, - "end-of-stream": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.1.tgz", - "integrity": "sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q==", - "requires": { - "once": "^1.4.0" + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" } }, - "entities": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.0.0.tgz", - "integrity": "sha1-sph6o4ITR/zeZCsk/fyeT7cSvyY=", - "dev": true - }, - "es6-promise": { - "version": "4.2.5", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.5.tgz", - "integrity": "sha512-n6wvpdE43VFtJq+lUDYDBFUwV8TZbuGXLV4D6wKafg13ldznKsyEvatubnmUe31zcvelSzOHF+XbaT+Bl9ObDg==", + "node_modules/es6-error": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", + "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", "dev": true, - "optional": true + "license": "MIT" }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } }, - "escodegen": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.8.1.tgz", - "integrity": "sha1-WltTr0aTEQvrsIZ6o0MN07cKEBg=", + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true, - "requires": { - "esprima": "^2.7.1", - "estraverse": "^1.9.1", - "esutils": "^2.0.2", - "optionator": "^0.8.1", - "source-map": "~0.2.0" + "license": "MIT", + "engines": { + "node": ">=10" }, - "dependencies": { - "source-map": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.2.0.tgz", - "integrity": "sha1-2rc/vPwrqBm03gO9b26qSBZLP50=", - "dev": true, - "optional": true, - "requires": { - "amdefine": ">=0.0.4" - } - } + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "esprima": { - "version": "2.7.3", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-2.7.3.tgz", - "integrity": "sha1-luO3DVd59q1JzQMmc9HDEnZ7pYE=", - "dev": true - }, - "estraverse": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-1.9.3.tgz", - "integrity": "sha1-r2fy3JIlgkFZUJJgkaQAXSnJu0Q=", - "dev": true - }, - "esutils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz", - "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=", - "dev": true - }, - "events": { + "node_modules/events": { "version": "1.1.1", - "resolved": "http://registry.npmjs.org/events/-/events-1.1.1.tgz", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", - "dev": true + "dev": true, + "engines": { + "node": ">=0.4.x" + } }, - "exit": { + "node_modules/exit": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=", - "dev": true - }, - "expand-brackets": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz", - "integrity": "sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s=", "dev": true, - "optional": true, - "requires": { - "is-posix-bracket": "^0.1.0" + "engines": { + "node": ">= 0.8.0" } }, - "expand-range": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", - "integrity": "sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc=", - "dev": true, - "optional": true, - "requires": { - "fill-range": "^2.1.0" - } + "node_modules/exponential-backoff": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/exponential-backoff/-/exponential-backoff-3.1.1.tgz", + "integrity": "sha512-dX7e/LHVJ6W3DE1MHWi9S1EYzDESENfLrYohG2G++ovZrYOkm4Knwa0mc1cn84xJOR4KEU0WSchhLbd0UklbHw==" }, - "extend": { + "node_modules/extend": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "dev": true }, - "extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dev": true, - "requires": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" + "node_modules/fast-fifo": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", + "integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==", + "license": "MIT" + }, + "node_modules/fdir": { + "version": "6.4.4", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", + "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==", + "license": "MIT", + "peerDependencies": { + "picomatch": "^3 || ^4" }, - "dependencies": { - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dev": true, - "requires": { - "is-plain-object": "^2.0.4" - } + "peerDependenciesMeta": { + "picomatch": { + "optional": true } } }, - "extglob": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-0.3.2.tgz", - "integrity": "sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE=", + "node_modules/find-cache-dir": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", + "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", "dev": true, - "optional": true, - "requires": { - "is-extglob": "^1.0.0" + "license": "MIT", + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/avajs/find-cache-dir?sponsor=1" } }, - "extract-zip": { - "version": "1.6.7", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.6.7.tgz", - "integrity": "sha1-qEC0uK9kAyZMjbV/Txp0Mz74H+k=", + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, - "optional": true, - "requires": { - "concat-stream": "1.6.2", - "debug": "2.6.9", - "mkdirp": "0.5.1", - "yauzl": "2.4.1" + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" } }, - "extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" - }, - "eyes": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/eyes/-/eyes-0.1.8.tgz", - "integrity": "sha1-Ys8SAjTGg3hdkCNIqADvPgzCC8A=", - "dev": true, - "optional": true - }, - "fast-deep-equal": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz", - "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=" - }, - "fast-json-stable-stringify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", - "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=" - }, - "fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", - "dev": true - }, - "fd-slicer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.0.1.tgz", - "integrity": "sha1-i1vL2ewyfFBBv5qwI/1nUPEXfmU=", + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", "dev": true, - "optional": true, - "requires": { - "pend": "~1.2.0" + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" } }, - "filename-regex": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/filename-regex/-/filename-regex-2.0.1.tgz", - "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=", - "dev": true, - "optional": true - }, - "fill-range": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", - "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", + "node_modules/flush-write-stream": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", + "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", "dev": true, - "optional": true, - "requires": { - "is-number": "^2.1.0", - "isobject": "^2.0.0", - "randomatic": "^3.0.0", - "repeat-element": "^1.1.2", - "repeat-string": "^1.5.2" + "dependencies": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" } }, - "flush-write-stream": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.0.3.tgz", - "integrity": "sha512-calZMC10u0FMUqoiunI2AiGIIUtUIvifNwkHhNupZH4cbNnW1Itkoh/Nf5HFYmDrwWPjrUxpkZT0KhuCq0jmGw==", + "node_modules/flush-write-stream/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, - "requires": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.4" + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" } }, - "for-in": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", + "node_modules/flush-write-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, - "for-own": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/for-own/-/for-own-0.1.5.tgz", - "integrity": "sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=", + "node_modules/flush-write-stream/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, - "optional": true, - "requires": { - "for-in": "^1.0.1" + "dependencies": { + "safe-buffer": "~5.1.0" } }, - "foreachasync": { + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/foreachasync": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/foreachasync/-/foreachasync-3.0.0.tgz", "integrity": "sha1-VQKYfchxS+M5IJfzLgBxyd7gfPY=", "dev": true }, - "forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" + "node_modules/foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } }, - "form-data": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.2.tgz", - "integrity": "sha1-SXBJi+YEwgwAXU9cI67NIda0kJk=", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "1.0.6", - "mime-types": "^2.1.12" + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "fragment-cache": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", - "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", - "dev": true, - "requires": { - "map-cache": "^0.2.2" + "node_modules/form-data-encoder": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-4.0.2.tgz", + "integrity": "sha512-KQVhvhK8ZkWzxKxOr56CPulAhH3dobtuQ4+hNQ+HekH/Wp5gSOafqRAeTphQUJAIk0GBvHZgJ2ZGRWd5kphMuw==", + "license": "MIT", + "engines": { + "node": ">= 18" } }, - "fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" + "node_modules/fromentries": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fromentries/-/fromentries-1.3.2.tgz", + "integrity": "sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" }, - "fs-extra": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.0.tgz", - "integrity": "sha512-EglNDLRpmaTWiD/qraZn6HREAEAHJcJOmxNEYwq6xeMKnVMAy3GUcFB+wXt2C6k4CNvB/mP1y/U3dzvKKj5OtQ==", - "requires": { + "node_modules/fs-extra": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", + "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", + "dependencies": { "graceful-fs": "^4.1.2", "jsonfile": "^4.0.0", "universalify": "^0.1.0" }, - "dependencies": { - "jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "requires": { - "graceful-fs": "^4.1.6" - } - } - } - }, - "fs-minipass": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.5.tgz", - "integrity": "sha512-JhBl0skXjUPCFH7x6x61gQxrKyXsxB5gcgePLZCwfyCGGsTISMoIeObbrvVeP6Xmyaudw4TT43qV2Gz+iyd2oQ==", - "requires": { - "minipass": "^2.2.1" + "engines": { + "node": ">=6 <7 || >=8" } }, - "fs-mkdirp-stream": { + "node_modules/fs-mkdirp-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-mkdirp-stream/-/fs-mkdirp-stream-1.0.0.tgz", "integrity": "sha1-C3gV/DIBxqaeFNuYzgmMFpNSWes=", "dev": true, - "requires": { + "dependencies": { "graceful-fs": "^4.1.11", "through2": "^2.0.3" + }, + "engines": { + "node": ">= 0.10" } }, - "fs-readdir-recursive": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz", - "integrity": "sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==", - "dev": true - }, - "fs.realpath": { + "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true }, - "fsevents": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.4.tgz", - "integrity": "sha512-z8H8/diyk76B7q5wg+Ud0+CqzcAF3mBBI/bA5ne5zrRUUIvNkJY//D3BqyH571KuAC4Nr7Rw7CjWX4r0y9DvNg==", + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true, - "optional": true, - "requires": { - "nan": "^2.9.2", - "node-pre-gyp": "^0.10.0" - }, - "dependencies": { - "abbrev": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "optional": true + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-9.0.1.tgz", + "integrity": "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==", + "license": "MIT", + "dependencies": { + "@sec-ant/readable-stream": "^0.4.1", + "is-stream": "^4.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-stream/node_modules/is-stream": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz", + "integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "5.0.15", + "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", + "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "dev": true, + "dependencies": { + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "2 || 3", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + } + }, + "node_modules/glob-parent": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", + "integrity": "sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==", + "dev": true, + "dependencies": { + "is-glob": "^3.1.0", + "path-dirname": "^1.0.0" + } + }, + "node_modules/glob-stream": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/glob-stream/-/glob-stream-6.1.0.tgz", + "integrity": "sha512-uMbLGAP3S2aDOHUDfdoYcdIePUCfysbAd0IAoWVZbeGU/oNQ8asHVSshLDJUPWxfzj8zsCG7/XeHPHTtow0nsw==", + "dev": true, + "dependencies": { + "extend": "^3.0.0", + "glob": "^7.1.1", + "glob-parent": "^3.1.0", + "is-negated-glob": "^1.0.0", + "ordered-read-streams": "^1.0.0", + "pumpify": "^1.3.5", + "readable-stream": "^2.1.5", + "remove-trailing-separator": "^1.0.1", + "to-absolute-glob": "^2.0.0", + "unique-stream": "^2.0.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/glob-stream/node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-stream/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/glob-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/glob-stream/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "14.4.7", + "resolved": "https://registry.npmjs.org/got/-/got-14.4.7.tgz", + "integrity": "sha512-DI8zV1231tqiGzOiOzQWDhsBmncFW7oQDH6Zgy6pDPrqJuVZMtoSgPLLsBZQj8Jg4JFfwoOsDA8NGtLQLnIx2g==", + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^7.0.1", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^12.0.1", + "decompress-response": "^6.0.0", + "form-data-encoder": "^4.0.2", + "http2-wrapper": "^2.2.1", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^4.0.1", + "responselike": "^3.0.0", + "type-fest": "^4.26.1" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/got/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.9", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", + "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasha": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-5.2.2.tgz", + "integrity": "sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-stream": "^2.0.0", + "type-fest": "^0.8.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "license": "MIT", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "optional": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==", + "dev": true + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true + }, + "node_modules/invert-kv": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", + "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ip-address": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-9.0.5.tgz", + "integrity": "sha512-zHtQzGojZXTwZTHQqra+ETKd4Sn3vgi7uBmlPoXVWZqYvuKmtI0l/VZTjqGmJY9x88GGOaZ9+G9ES8hC4T4X8g==", + "license": "MIT", + "dependencies": { + "jsbn": "1.1.0", + "sprintf-js": "^1.1.3" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/ip-address/node_modules/sprintf-js": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", + "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", + "license": "BSD-3-Clause" + }, + "node_modules/is-absolute": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz", + "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==", + "dev": true, + "dependencies": { + "is-relative": "^1.0.0", + "is-windows": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-arguments": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.2.0.tgz", + "integrity": "sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz", + "integrity": "sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-proto": "^1.0.0", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-negated-glob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-negated-glob/-/is-negated-glob-1.0.0.tgz", + "integrity": "sha1-aRC8pdqMleeEtXUbl2z1oQ/uNtI=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-relative": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz", + "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==", + "dev": true, + "dependencies": { + "is-unc-path": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "node_modules/is-unc-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz", + "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==", + "dev": true, + "dependencies": { + "unc-path-regex": "^0.1.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-utf8": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", + "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=", + "dev": true + }, + "node_modules/is-valid-glob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-valid-glob/-/is-valid-glob-1.0.0.tgz", + "integrity": "sha1-Kb8+/3Ab4tTTFdusw5vDn+j2Aao=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-hook": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-3.0.0.tgz", + "integrity": "sha512-Pt/uge1Q9s+5VAZ+pCo16TYMWPBIl+oaNIjgLQxcX0itS6ueeaA+pEfThZpH8WxhFgCiEb8sAJY6MdUKgiIWaQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "append-transform": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-processinfo": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-processinfo/-/istanbul-lib-processinfo-2.0.3.tgz", + "integrity": "sha512-NkwHbo3E00oybX6NGJi6ar0B29vxyvNwoC7eJ4G4Yq28UfY758Hgn/heV8VRFhevPED4LXfFz0DQ8z/0kw9zMg==", + "dev": true, + "license": "ISC", + "dependencies": { + "archy": "^1.0.0", + "cross-spawn": "^7.0.3", + "istanbul-lib-coverage": "^3.2.0", + "p-map": "^3.0.0", + "rimraf": "^3.0.0", + "uuid": "^8.3.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-processinfo/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/istanbul-lib-report/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jmespath": { + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz", + "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==", + "dev": true, + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/js-beautify": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.5.10.tgz", + "integrity": "sha1-TZU3FwJpk0SlFsomv1nwonu3Vxk=", + "dev": true, + "dependencies": { + "config-chain": "~1.1.5", + "mkdirp": "~0.5.0", + "nopt": "~3.0.1" + }, + "bin": { + "css-beautify": "js/bin/css-beautify.js", + "html-beautify": "js/bin/html-beautify.js", + "js-beautify": "js/bin/js-beautify.js" + } + }, + "node_modules/js-beautify/node_modules/nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/js-yaml/node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/jsbn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==", + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jshint": { + "version": "2.13.4", + "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.13.4.tgz", + "integrity": "sha512-HO3bosL84b2qWqI0q+kpT/OpRJwo0R4ivgmxaO848+bo10rc50SkPnrtwSFXttW0ym4np8jbJvLwk5NziB7jIw==", + "dev": true, + "dependencies": { + "cli": "~1.0.0", + "console-browserify": "1.1.x", + "exit": "0.1.x", + "htmlparser2": "3.8.x", + "lodash": "~4.17.21", + "minimatch": "~3.0.2", + "strip-json-comments": "1.0.x" + }, + "bin": { + "jshint": "bin/jshint" + } + }, + "node_modules/jshint/node_modules/dom-serializer": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", + "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", + "dev": true, + "dependencies": { + "domelementtype": "^2.0.1", + "entities": "^2.0.0" + } + }, + "node_modules/jshint/node_modules/dom-serializer/node_modules/domelementtype": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", + "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/jshint/node_modules/dom-serializer/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "dev": true, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/jshint/node_modules/domelementtype": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", + "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==", + "dev": true + }, + "node_modules/jshint/node_modules/domhandler": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.3.0.tgz", + "integrity": "sha1-LeWaCCLVAn+r/28DLCsloqir5zg=", + "dev": true, + "dependencies": { + "domelementtype": "1" + } + }, + "node_modules/jshint/node_modules/domutils": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", + "integrity": "sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8=", + "dev": true, + "dependencies": { + "dom-serializer": "0", + "domelementtype": "1" + } + }, + "node_modules/jshint/node_modules/entities": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-1.0.0.tgz", + "integrity": "sha1-sph6o4ITR/zeZCsk/fyeT7cSvyY=", + "dev": true + }, + "node_modules/jshint/node_modules/htmlparser2": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.8.3.tgz", + "integrity": "sha1-mWwosZFRaovoZQGn15dX5ccMEGg=", + "dev": true, + "dependencies": { + "domelementtype": "1", + "domhandler": "2.3", + "domutils": "1.5", + "entities": "1.0", + "readable-stream": "1.1" + } + }, + "node_modules/jshint/node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true + }, + "node_modules/jshint/node_modules/minimatch": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.8.tgz", + "integrity": "sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jshint/node_modules/readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "node_modules/jshint/node_modules/string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/klaw": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/klaw/-/klaw-1.3.1.tgz", + "integrity": "sha1-QIhDO0azsbolnXh4XY6W9zugJDk=", + "dev": true, + "optionalDependencies": { + "graceful-fs": "^4.1.9" + } + }, + "node_modules/lazystream": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.1.tgz", + "integrity": "sha512-b94GiNHQNy6JNTrt5w6zNyffMrNkXZb3KTkCZJb2V1xaEGCk093vkZ2jk3tpaeP33/OiXC+WvK9AxUebnf5nbw==", + "dev": true, + "dependencies": { + "readable-stream": "^2.0.5" + }, + "engines": { + "node": ">= 0.6.3" + } + }, + "node_modules/lazystream/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/lazystream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/lazystream/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/lcid": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", + "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", + "dev": true, + "dependencies": { + "invert-kv": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lcov-result-merger": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/lcov-result-merger/-/lcov-result-merger-3.3.0.tgz", + "integrity": "sha512-Krg9p24jGaIT93RBMA8b5qLHDEiAXTavaTiEdMAZaJS93PsBKIcg/89cw/8rgeSfRuQX+I9x7h73SHFjCZ6cHg==", + "dev": true, + "dependencies": { + "through2": "^2.0.3", + "vinyl": "^2.1.0", + "vinyl-fs": "^3.0.2", + "yargs": "^16.2.0" + }, + "bin": { + "lcov-result-merger": "bin/lcov-result-merger.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/lcov-result-merger/node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/lcov-result-merger/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/lcov-result-merger/node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/lcov-result-merger/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/lead": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lead/-/lead-1.0.0.tgz", + "integrity": "sha1-bxT5mje+Op3XhPVJVpDlkDRm7kI=", + "dev": true, + "dependencies": { + "flush-write-stream": "^1.0.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.flattendeep": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", + "integrity": "sha512-uHaJFihxmJcEX3kT4I23ABqKKalJ/zDrDg0lsFtc1h+3uw49SIJ5beyhx5ExVRti3AvKoOJngIj7xz3oylPdWQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "license": "ISC" + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dev": true, + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/make-fetch-happen": { + "version": "14.0.3", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-14.0.3.tgz", + "integrity": "sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ==", + "license": "ISC", + "dependencies": { + "@npmcli/agent": "^3.0.0", + "cacache": "^19.0.1", + "http-cache-semantics": "^4.1.1", + "minipass": "^7.0.2", + "minipass-fetch": "^4.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^1.0.0", + "proc-log": "^5.0.0", + "promise-retry": "^2.0.1", + "ssri": "^12.0.0" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/make-fetch-happen/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", + "integrity": "sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-collect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-2.0.1.tgz", + "integrity": "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==", + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minipass-collect/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minipass-fetch": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-4.0.1.tgz", + "integrity": "sha512-j7U11C5HXigVuutxebFadoYBbd7VSdZWggSe64NVdvWNBqGAiXPL2QVCehjmw7lY1oF9gOllYbORh+hiNgfPgQ==", + "license": "MIT", + "dependencies": { + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^3.0.1" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/minipass-fetch/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minipass-flush": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", + "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-pipeline": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", + "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-sized": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", + "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", + "license": "MIT", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/minizlib/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "dev": true, + "dependencies": { + "minimist": "^1.2.5" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/mocha": { + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-11.4.0.tgz", + "integrity": "sha512-O6oi5Y9G6uu8f9iqXR6iKNLWHLRex3PKbmHynfpmUnMJJGrdgXh8ZmS85Ei5KR2Gnl+/gQ9s+Ktv5CqKybNw4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "browser-stdout": "^1.3.1", + "chokidar": "^4.0.1", + "debug": "^4.3.5", + "diff": "^7.0.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^10.4.5", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "picocolors": "^1.1.1", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^17.7.2", + "yargs-parser": "^21.1.1", + "yargs-unparser": "^2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/mocha/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/mocha/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/mocha/node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/mocha/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/mocha/node_modules/glob/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/mocha/node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/mocha/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mocha/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mocha/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/mocha/node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha/node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/mocha/node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nan": { + "version": "2.23.1", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.23.1.tgz", + "integrity": "sha512-r7bBUGKzlqk8oPBDYxt6Z0aEdF1G1rwlMcLk8LCOMbOzf0mG+JUfUzG4fIMWwHWP0iyaLWEQZJmtB7nOHEm/qw==", + "license": "MIT" + }, + "node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/node-fetch": { + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-gyp": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-11.2.0.tgz", + "integrity": "sha512-T0S1zqskVUSxcsSTkAsLc7xCycrRYmtDHadDinzocrThjyQCn5kMlEBSj6H4qDbgsIOSLmmlRIeb0lZXj+UArA==", + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "exponential-backoff": "^3.1.1", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^14.0.3", + "nopt": "^8.0.0", + "proc-log": "^5.0.0", + "semver": "^7.3.5", + "tar": "^7.4.3", + "tinyglobby": "^0.2.12", + "which": "^5.0.0" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/node-preload": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/node-preload/-/node-preload-0.2.1.tgz", + "integrity": "sha512-RM5oyBy45cLEoHqCeh+MNuFAxO0vTFBLskvQbOKnEE7YTTSN4tbN8QWDIPQ6L+WvKsB/qLEGpYe2ZZ9d4W9OIQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "process-on-spawn": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/node-releases": { + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "dev": true, + "license": "MIT" + }, + "node_modules/nopt": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-8.1.0.tgz", + "integrity": "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==", + "license": "ISC", + "dependencies": { + "abbrev": "^3.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/nopt/node_modules/abbrev": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz", + "integrity": "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg==", + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dev": true, + "dependencies": { + "remove-trailing-separator": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.1.tgz", + "integrity": "sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/now-and-later": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/now-and-later/-/now-and-later-2.0.1.tgz", + "integrity": "sha512-KGvQ0cB70AQfg107Xvs/Fbu+dGmZoTRJp2TaPwcwQm3/7PteUyN2BCgk8KBMPGBUXZdVwyWS8fDCGFygBm19UQ==", + "dev": true, + "dependencies": { + "once": "^1.3.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nyc": { + "version": "17.1.0", + "resolved": "https://registry.npmjs.org/nyc/-/nyc-17.1.0.tgz", + "integrity": "sha512-U42vQ4czpKa0QdI1hu950XuNhYqgoM+ZF1HT+VuUHL9hPfDPVvNQyltmMqdE9bUHMVa+8yNbc3QKTj8zQhlVxQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "caching-transform": "^4.0.0", + "convert-source-map": "^1.7.0", + "decamelize": "^1.2.0", + "find-cache-dir": "^3.2.0", + "find-up": "^4.1.0", + "foreground-child": "^3.3.0", + "get-package-type": "^0.1.0", + "glob": "^7.1.6", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-hook": "^3.0.0", + "istanbul-lib-instrument": "^6.0.2", + "istanbul-lib-processinfo": "^2.0.2", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.0.2", + "make-dir": "^3.0.0", + "node-preload": "^0.2.1", + "p-map": "^3.0.0", + "process-on-spawn": "^1.0.0", + "resolve-from": "^5.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "spawn-wrap": "^2.0.0", + "test-exclude": "^6.0.0", + "yargs": "^15.0.2" + }, + "bin": { + "nyc": "bin/nyc.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/nyc/node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/nyc/node_modules/cliui": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", + "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^6.2.0" + } + }, + "node_modules/nyc/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/nyc/node_modules/p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nyc/node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nyc/node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/nyc/node_modules/yargs": { + "version": "15.4.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", + "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^6.0.0", + "decamelize": "^1.2.0", + "find-up": "^4.1.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^4.2.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^18.1.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nyc/node_modules/yargs-parser": { + "version": "18.1.3", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", + "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "dev": true, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/ordered-read-streams": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ordered-read-streams/-/ordered-read-streams-1.0.1.tgz", + "integrity": "sha1-d8DLN8QVJdZBZtmQ/61+xqDhNj4=", + "dev": true, + "dependencies": { + "readable-stream": "^2.0.1" + } + }, + "node_modules/ordered-read-streams/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/ordered-read-streams/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/ordered-read-streams/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/os-locale": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", + "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=", + "dev": true, + "dependencies": { + "lcid": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/p-cancelable": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-4.0.1.tgz", + "integrity": "sha512-wBowNApzd45EIKdO1LaU+LrMBwAcjfPaYtVzV3lmfM3gf8Z4CHZsiIqlM8TZZ8okYvh5A1cP6gTfCRQtwUpaUg==", + "license": "MIT", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-map": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.3.tgz", + "integrity": "sha512-VkndIv2fIB99swvQoA65bm+fsmt6UNdGeIB0oxBs+WhAhdh08QA04JXpI7rbB9r08/nkbysKoya9rtDERYOYMA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-hash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/package-hash/-/package-hash-4.0.0.tgz", + "integrity": "sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "graceful-fs": "^4.1.15", + "hasha": "^5.0.0", + "lodash.flattendeep": "^4.4.0", + "release-zalgo": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "license": "BlueOak-1.0.0" + }, + "node_modules/path-dirname": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", + "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=", + "dev": true + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/proc-log": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-5.0.0.tgz", + "integrity": "sha512-Azwzvl90HaF0aCz1JrDdXQykFakSSNPaPoiZ9fm5qJIMHioDZEi7OAdRwSm6rSoPtY3Qutnm3L7ogmg3dc+wbQ==", + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "node_modules/process-on-spawn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.1.0.tgz", + "integrity": "sha512-JOnOPQ/8TZgjs1JIH/m9ni7FfimjNa/PRx7y/Wb5qdItsnhO0jE4AT7fC0HjC28DUQWDr50dwSYZLdRMlqDq3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "fromentries": "^1.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/promise-retry": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "license": "MIT", + "dependencies": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk=", + "dev": true + }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/pumpify": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", + "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", + "dev": true, + "dependencies": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + } + }, + "node_modules/pumpify/node_modules/pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "dev": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", + "deprecated": "The querystring API is considered Legacy. new code should use the URLSearchParams API instead.", + "dev": true, + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/release-zalgo": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", + "integrity": "sha512-gUAyHVHPPC5wdqX/LG4LWtRYtgjxyX78oanFNTMMyFEfOqdC54s3eE82imuWKbOeqYht2CrNf64Qb8vgmmtZGA==", + "dev": true, + "license": "ISC", + "dependencies": { + "es6-error": "^4.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/remove-bom-buffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/remove-bom-buffer/-/remove-bom-buffer-3.0.0.tgz", + "integrity": "sha512-8v2rWhaakv18qcvNeli2mZ/TMTL2nEyAKRvzo1WtnZBl15SHyEhrCu2/xKlJyUFKHiHgfXIyuY6g2dObJJycXQ==", + "dev": true, + "dependencies": { + "is-buffer": "^1.1.5", + "is-utf8": "^0.2.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/remove-bom-stream": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/remove-bom-stream/-/remove-bom-stream-1.2.0.tgz", + "integrity": "sha1-BfGlk/FuQuH7kOv1nejlaVJflSM=", + "dev": true, + "dependencies": { + "remove-bom-buffer": "^3.0.0", + "safe-buffer": "^5.1.0", + "through2": "^2.0.3" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", + "dev": true + }, + "node_modules/replace-ext": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.1.tgz", + "integrity": "sha512-yD5BHCe7quCgBph4rMQ+0KkIRKwWCrHDOX1p1Gp6HwjPM5kVoCdKGNhN7ydqqsX6lJEnQDKZ/tFMiEdQ1dvPEw==", + "dev": true, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true, + "license": "ISC" + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-options": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/resolve-options/-/resolve-options-1.1.0.tgz", + "integrity": "sha1-MrueOcBtZzONyTeMDW1gdFZq0TE=", + "dev": true, + "dependencies": { + "value-or-function": "^3.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "license": "MIT", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "optional": true + }, + "node_modules/sax": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", + "integrity": "sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA==", + "dev": true, + "license": "ISC" + }, + "node_modules/semver": { + "version": "7.7.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.0.tgz", + "integrity": "sha512-DrfFnPzblFmNrIZzg5RzHegbiRWg7KMR7btwi2yjHwx06zsUbO5g613sVwEV7FTwmzJu+Io0lJe2GJ3LxqpvBQ==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.8.4", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.4.tgz", + "integrity": "sha512-D3YaD0aRxR3mEcqnidIs7ReYJFVzWdd6fXJYUM8ixcQcJRGTka/b3saV0KflYhyVJXKhb947GndU35SxYNResQ==", + "license": "MIT", + "dependencies": { + "ip-address": "^9.0.5", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/spawn-wrap": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz", + "integrity": "sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^2.0.0", + "is-windows": "^1.0.2", + "make-dir": "^3.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "which": "^2.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/spawn-wrap/node_modules/foreground-child": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz", + "integrity": "sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/spawn-wrap/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "node_modules/ssri": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-12.0.0.tgz", + "integrity": "sha512-S7iGNosepx9RadX82oimUkvr0Ct7IjJbEbs4mJcTxst8um95J3sDYU1RBEOvdu6oL1Wek2ODI5i4MAw+dZ6cAQ==", + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/ssri/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==", + "dev": true + }, + "node_modules/streamx": { + "version": "2.22.0", + "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.22.0.tgz", + "integrity": "sha512-sLh1evHOzBy/iWRiR6d1zRcLao4gGZr3C1kzNz4fopCOKJb6xD9ub8Mpi9Mr1R6id5o43S+d93fI48UC5uM9aw==", + "license": "MIT", + "dependencies": { + "fast-fifo": "^1.3.2", + "text-decoder": "^1.1.0" + }, + "optionalDependencies": { + "bare-events": "^2.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-1.0.4.tgz", + "integrity": "sha1-HhX7ysl9Pumb8tc7TGVrCCu6+5E=", + "dev": true, + "bin": { + "strip-json-comments": "cli.js" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "license": "ISC", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/tar-fs": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.1.1.tgz", + "integrity": "sha512-LZA0oaPOc2fVo82Txf3gw+AkEd38szODlptMYejQUhndHMLQ9M059uXR+AfS7DNo0NpINvSqDsvyaCrBVkptWg==", + "dependencies": { + "pump": "^3.0.0", + "tar-stream": "^3.1.5" + }, + "optionalDependencies": { + "bare-fs": "^4.0.1", + "bare-path": "^3.0.0" + } + }, + "node_modules/tar-stream": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz", + "integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==", + "license": "MIT", + "dependencies": { + "b4a": "^1.6.4", + "fast-fifo": "^1.2.0", + "streamx": "^2.15.0" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tar/node_modules/mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "license": "MIT", + "bin": { + "mkdirp": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tar/node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/text-decoder": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.2.3.tgz", + "integrity": "sha512-3/o9z3X0X0fTupwsYvR03pJ/DjWuqqrfwBgTQzdWDiQSm9KitAyz/9WqsT2JQW7KV2m+bC2ol/zqpW37NHxLaA==", + "license": "Apache-2.0", + "dependencies": { + "b4a": "^1.6.4" + } + }, + "node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/through2-filter": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/through2-filter/-/through2-filter-3.0.0.tgz", + "integrity": "sha512-jaRjI2WxN3W1V8/FMZ9HKIBXixtiqs3SQSX4/YGIiP3gL6djW48VoZq9tDqeCWs3MT8YY5wb/zli8VW8snY1CA==", + "dev": true, + "dependencies": { + "through2": "~2.0.0", + "xtend": "~4.0.0" + } + }, + "node_modules/through2/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/through2/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/through2/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz", + "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==", + "license": "MIT", + "dependencies": { + "fdir": "^6.4.4", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/to-absolute-glob": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/to-absolute-glob/-/to-absolute-glob-2.0.2.tgz", + "integrity": "sha1-GGX0PZ50sIItufFFt4z/fQ98hJs=", + "dev": true, + "dependencies": { + "is-absolute": "^1.0.0", + "is-negated-glob": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/to-through": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-through/-/to-through-2.0.0.tgz", + "integrity": "sha1-/JKtq6ByZHvAtn1rA2ZKoZUJOvY=", + "dev": true, + "dependencies": { + "through2": "^2.0.3" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=" + }, + "node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/unc-path-regex": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz", + "integrity": "sha1-5z3T17DXxe2G+6xrCufYxqadUPo=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/unique-filename": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-4.0.0.tgz", + "integrity": "sha512-XSnEewXmQ+veP7xX2dS5Q4yZAvO40cBN2MWkJ7D/6sW4Dg6wYBNwM1Vrnz1FhH5AdeLIlUXRI9e28z1YZi71NQ==", + "license": "ISC", + "dependencies": { + "unique-slug": "^5.0.0" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/unique-slug": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-5.0.0.tgz", + "integrity": "sha512-9OdaqO5kwqR+1kVgHAhsp5vPNU0hnxRa26rBFNfNgM7M6pNtgzeBn3s/xbyCQL3dcjzOatcef6UUHpB/6MaETg==", + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/unique-stream": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/unique-stream/-/unique-stream-2.3.1.tgz", + "integrity": "sha512-2nY4TnBE70yoxHkDli7DMazpWiP7xMdCYqU2nBRO0UB+ZpEkGsSija7MvmvnZFUeC+mrgiUfcHSr3LmRFIg4+A==", + "dev": true, + "dependencies": { + "json-stable-stringify-without-jsonify": "^1.0.1", + "through2-filter": "^3.0.0" + } + }, + "node_modules/universalify": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.2.tgz", + "integrity": "sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" }, - "ansi-regex": { - "version": "2.1.1", - "bundled": true, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/url": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", + "integrity": "sha1-Ah5NnHcF8hu/N9A861h2dAJ3TGQ=", + "dev": true, + "dependencies": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "node_modules/url/node_modules/punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=", + "dev": true + }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "node_modules/uuid": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz", + "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==", + "dev": true, + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-or-function": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/value-or-function/-/value-or-function-3.0.0.tgz", + "integrity": "sha1-HCQ6ULWVwb5Up1S/7OhWO5/42BM=", + "dev": true, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/vinyl": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/vinyl/-/vinyl-2.2.1.tgz", + "integrity": "sha512-LII3bXRFBZLlezoG5FfZVcXflZgWP/4dCwKtxd5ky9+LOtM4CS3bIRQsmR1KMnMW07jpE8fqR2lcxPZ+8sJIcw==", + "dev": true, + "dependencies": { + "clone": "^2.1.1", + "clone-buffer": "^1.0.0", + "clone-stats": "^1.0.0", + "cloneable-readable": "^1.0.0", + "remove-trailing-separator": "^1.0.1", + "replace-ext": "^1.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/vinyl-fs": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/vinyl-fs/-/vinyl-fs-3.0.3.tgz", + "integrity": "sha512-vIu34EkyNyJxmP0jscNzWBSygh7VWhqun6RmqVfXePrOwi9lhvRs//dOaGOTRUQr4tx7/zd26Tk5WeSVZitgng==", + "dev": true, + "dependencies": { + "fs-mkdirp-stream": "^1.0.0", + "glob-stream": "^6.1.0", + "graceful-fs": "^4.0.0", + "is-valid-glob": "^1.0.0", + "lazystream": "^1.0.0", + "lead": "^1.0.0", + "object.assign": "^4.0.4", + "pumpify": "^1.3.5", + "readable-stream": "^2.3.3", + "remove-bom-buffer": "^3.0.0", + "remove-bom-stream": "^1.2.0", + "resolve-options": "^1.1.0", + "through2": "^2.0.0", + "to-through": "^2.0.0", + "value-or-function": "^3.0.0", + "vinyl": "^2.0.0", + "vinyl-sourcemap": "^1.1.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/vinyl-fs/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/vinyl-fs/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "node_modules/vinyl-fs/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/vinyl-sourcemap": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/vinyl-sourcemap/-/vinyl-sourcemap-1.1.0.tgz", + "integrity": "sha1-kqgAWTo4cDqM2xHYswCtS+Y7PhY=", + "dev": true, + "dependencies": { + "append-buffer": "^1.0.2", + "convert-source-map": "^1.5.0", + "graceful-fs": "^4.1.6", + "normalize-path": "^2.1.1", + "now-and-later": "^2.0.0", + "remove-bom-buffer": "^3.0.0", + "vinyl": "^2.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/walk": { + "version": "2.3.15", + "resolved": "https://registry.npmjs.org/walk/-/walk-2.3.15.tgz", + "integrity": "sha512-4eRTBZljBfIISK1Vnt69Gvr2w/wc3U6Vtrw7qiN5iqYJPH7LElcYh/iU4XWhdCy2dZqv1ToMyYlybDylfG/5Vg==", + "dev": true, + "dependencies": { + "foreachasync": "^3.0.0" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha1-lmRU6HZUYuN2RNNib2dCzotwll0=", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/which/-/which-5.0.0.tgz", + "integrity": "sha512-JEdGzHwwkrbWoGOlIHqQ5gtprKGOenpDHpxE9zVR1bWbOtYRyPPHMe9FaP6x61CmNaTThSkb0DAJte5jD+DmzQ==", + "license": "ISC", + "dependencies": { + "isexe": "^3.1.1" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/which-module": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", + "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/which-typed-array": { + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + "dev": true, + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which/node_modules/isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", + "license": "ISC", + "engines": { + "node": ">=16" + } + }, + "node_modules/window-size": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz", + "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=", + "dev": true, + "bin": { + "window-size": "cli.js" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/workerpool": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz", + "integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/wrap-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", + "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", + "dev": true, + "dependencies": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi/node_modules/is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true, + "dependencies": { + "number-is-nan": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true, + "dependencies": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "dependencies": { + "ansi-regex": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/xml2js": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz", + "integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==", + "dev": true, + "license": "MIT", + "dependencies": { + "sax": ">=0.6.0", + "xmlbuilder": "~11.0.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/xmlbuilder": { + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", + "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true, + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz", + "integrity": "sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==", + "dev": true + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/yargs": { + "version": "3.29.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.29.0.tgz", + "integrity": "sha1-GquWYOrnnYuPZ1vK7qtu40ws9pw=", + "dev": true, + "dependencies": { + "camelcase": "^1.2.1", + "cliui": "^3.0.3", + "decamelize": "^1.0.0", + "os-locale": "^1.4.0", + "window-size": "^0.1.2", + "y18n": "^3.2.0" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs-unparser/node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + }, + "dependencies": { + "@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "requires": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + } + }, + "@babel/compat-data": { + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.5.tgz", + "integrity": "sha512-XvcZi1KWf88RVbF9wn8MN6tYFloU5qX8KjuF3E1PVBmJ9eypXfs4GRiJwLuTZL0iSnJUKn1BFPa5BPZZJyFzPg==", + "dev": true + }, + "@babel/core": { + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.7.tgz", + "integrity": "sha512-SRijHmF0PSPgLIBYlWnG0hyeJLwXE2CgpsXaMOrtt2yp9/86ALw6oUlj9KYuZ0JN07T4eBMVIW4li/9S1j2BGA==", + "dev": true, + "peer": true, + "requires": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.5", + "@babel/helper-compilation-targets": "^7.26.5", + "@babel/helper-module-transforms": "^7.26.0", + "@babel/helpers": "^7.26.7", + "@babel/parser": "^7.26.7", + "@babel/template": "^7.25.9", + "@babel/traverse": "^7.26.7", + "@babel/types": "^7.26.7", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "dependencies": { + "convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", "dev": true }, - "aproba": { - "version": "1.2.0", - "bundled": true, + "semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true + } + } + }, + "@babel/generator": { + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.5.tgz", + "integrity": "sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw==", + "dev": true, + "requires": { + "@babel/parser": "^7.26.5", + "@babel/types": "^7.26.5", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" + } + }, + "@babel/helper-compilation-targets": { + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz", + "integrity": "sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==", + "dev": true, + "requires": { + "@babel/compat-data": "^7.26.5", + "@babel/helper-validator-option": "^7.25.9", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "dependencies": { + "lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, - "optional": true + "requires": { + "yallist": "^3.0.2" + } }, - "are-we-there-yet": { - "version": "1.1.4", - "bundled": true, - "dev": true, - "optional": true, + "semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true + }, + "yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + } + } + }, + "@babel/helper-module-imports": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", + "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", + "dev": true, + "requires": { + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" + } + }, + "@babel/helper-module-transforms": { + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", + "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", + "dev": true, + "requires": { + "@babel/helper-module-imports": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9", + "@babel/traverse": "^7.25.9" + } + }, + "@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true + }, + "@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true + }, + "@babel/helper-validator-option": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", + "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", + "dev": true + }, + "@babel/helpers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.1.tgz", + "integrity": "sha512-FCvFTm0sWV8Fxhpp2McP5/W53GPllQ9QeQ7SiqGWjMf/LVG07lFa5+pgK05IRhVwtvafT22KF+ZSnM9I545CvQ==", + "dev": true, + "requires": { + "@babel/template": "^7.27.1", + "@babel/types": "^7.27.1" + } + }, + "@babel/parser": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.2.tgz", + "integrity": "sha512-QYLs8299NA7WM/bZAdp+CviYYkVoYXlDW2rzliy3chxd1PQjej7JORuMJDJXJUb9g0TT+B99EwaVLKmX+sPXWw==", + "dev": true, + "requires": { + "@babel/types": "^7.27.1" + } + }, + "@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + } + }, + "@babel/traverse": { + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.7.tgz", + "integrity": "sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.5", + "@babel/parser": "^7.26.7", + "@babel/template": "^7.25.9", + "@babel/types": "^7.26.7", + "debug": "^4.3.1", + "globals": "^11.1.0" + } + }, + "@babel/types": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.1.tgz", + "integrity": "sha512-+EzkxvLNfiUeKMgy/3luqfsCWFRXLb7U6wNQTk60tovuckwB15B191tJWvpp4HjiQWdJkCxO3Wbvc6jlk3Xb2Q==", + "dev": true, + "requires": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" + } + }, + "@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "requires": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==" + }, + "ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==" + }, + "emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "requires": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + } + }, + "strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "requires": { + "ansi-regex": "^6.0.1" + } + }, + "wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "requires": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + } + } + } + }, + "@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "requires": { + "minipass": "^7.0.4" + }, + "dependencies": { + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" + } + } + }, + "@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "dependencies": { + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + } + } + }, + "@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true + }, + "@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "dev": true, + "requires": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true + }, + "@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true + }, + "@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "dev": true + }, + "@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "requires": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "@mapbox/node-pre-gyp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-2.0.0.tgz", + "integrity": "sha512-llMXd39jtP0HpQLVI37Bf1m2ADlEb35GYSh1SDSLsBhR+5iCxiNGlT31yqbNtVHygHAtMy6dWFERpU2JgufhPg==", + "requires": { + "consola": "^3.2.3", + "detect-libc": "^2.0.0", + "https-proxy-agent": "^7.0.5", + "node-fetch": "^2.6.7", + "nopt": "^8.0.0", + "semver": "^7.5.3", + "tar": "^7.4.0" + } + }, + "@npmcli/agent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/agent/-/agent-3.0.0.tgz", + "integrity": "sha512-S79NdEgDQd/NGCay6TCoVzXSj74skRZIKJcpJjC5lOq34SZzyI6MqtiiWoiVWoVrTcGjNeC4ipbh1VIHlpfF5Q==", + "requires": { + "agent-base": "^7.1.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.1", + "lru-cache": "^10.0.1", + "socks-proxy-agent": "^8.0.3" + } + }, + "@npmcli/fs": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-4.0.0.tgz", + "integrity": "sha512-/xGlezI6xfGO9NwuJlnwz/K14qD1kCSAGtacBHnGzeAIuJGazcp45KP5NuyARXoKb7cwulAGWVsbeSxdG/cb0Q==", + "requires": { + "semver": "^7.3.5" + } + }, + "@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "optional": true + }, + "@sec-ant/readable-stream": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz", + "integrity": "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==" + }, + "@sindresorhus/is": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-7.0.1.tgz", + "integrity": "sha512-QWLl2P+rsCJeofkDNIT3WFmb6NrRud1SUYW8dIhXK/46XFV8Q/g7Bsvib0Askb0reRLe+WYPeeE+l5cH7SlkuQ==" + }, + "@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "requires": { + "defer-to-connect": "^2.0.1" + } + }, + "@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==" + }, + "abbrev": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.0.9.tgz", + "integrity": "sha1-kbR5JYinc4wl813W9jdSovh3YTU=", + "dev": true + }, + "agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==" + }, + "aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "requires": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + } + }, + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "requires": { + "color-convert": "^2.0.1" + } + }, + "append-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/append-buffer/-/append-buffer-1.0.2.tgz", + "integrity": "sha1-2CIM9GYIFSXv6lBhTz3mUU36WPE=", + "dev": true, + "requires": { + "buffer-equal": "^1.0.0" + } + }, + "append-transform": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/append-transform/-/append-transform-2.0.0.tgz", + "integrity": "sha512-7yeyCEurROLQJFv5Xj4lEGTy0borxepjFv1g22oAdqFu//SrAlDl1O1Nxx15SH1RoliUml6p8dwJW9jvZughhg==", + "dev": true, + "requires": { + "default-require-extensions": "^3.0.0" + } + }, + "archy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", + "integrity": "sha512-Xg+9RwCg/0p32teKdGMPTPnVXKD0w3DfHnFTficozsAgsvq2XenPJq/MYpzzQ/v8zrOyJn6Ds39VA4JIDwFfqw==", + "dev": true + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dev": true, + "requires": { + "possible-typed-array-names": "^1.0.0" + } + }, + "aws-sdk": { + "version": "2.1692.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1692.0.tgz", + "integrity": "sha512-x511uiJ/57FIsbgUe5csJ13k3uzu25uWQE+XqfBis/sB0SFoiElJWXRkgEAUh0U6n40eT3ay5Ue4oPkRMu1LYw==", + "dev": true, + "requires": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.16.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "util": "^0.12.4", + "uuid": "8.0.0", + "xml2js": "0.6.2" + } + }, + "b4a": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.7.tgz", + "integrity": "sha512-OnAYlL5b7LEkALw87fUVafQw5rVR9RjwGd4KUwNQ6DrrNmaVaUCgLipfVlzrPQ4tWOR9P0IXGNOx50jYCCdSJg==" + }, + "balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "bare-events": { + "version": "2.5.4", + "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.5.4.tgz", + "integrity": "sha512-+gFfDkR8pj4/TrWCGUGWmJIkBwuxPS5F+a5yWjOHQt2hHvNZd5YLzadjmDUtFmMM4y429bnKLa8bYBMHcYdnQA==", + "optional": true + }, + "bare-fs": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-4.1.5.tgz", + "integrity": "sha512-1zccWBMypln0jEE05LzZt+V/8y8AQsQQqxtklqaIyg5nu6OAYFhZxPXinJTSG+kU5qyNmeLgcn9AW7eHiCHVLA==", + "optional": true, + "requires": { + "bare-events": "^2.5.4", + "bare-path": "^3.0.0", + "bare-stream": "^2.6.4" + } + }, + "bare-os": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/bare-os/-/bare-os-3.6.1.tgz", + "integrity": "sha512-uaIjxokhFidJP+bmmvKSgiMzj2sV5GPHaZVAIktcxcpCyBFFWO+YlikVAdhmUo2vYFvFhOXIAlldqV29L8126g==", + "optional": true + }, + "bare-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bare-path/-/bare-path-3.0.0.tgz", + "integrity": "sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==", + "optional": true, + "requires": { + "bare-os": "^3.0.1" + } + }, + "bare-stream": { + "version": "2.6.5", + "resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.6.5.tgz", + "integrity": "sha512-jSmxKJNJmHySi6hC42zlZnq00rga4jjxcgNZjY9N5WlOe/iOoGRtdwGsHzQv2RlH2KOYMwGUXhf2zXd32BA9RA==", + "optional": true, + "requires": { + "streamx": "^2.21.0" + } + }, + "base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "browserslist": { + "version": "4.24.4", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", + "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==", + "dev": true, + "peer": true, + "requires": { + "caniuse-lite": "^1.0.30001688", + "electron-to-chromium": "^1.5.73", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.1" + } + }, + "buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "dev": true, + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "buffer-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-1.0.0.tgz", + "integrity": "sha1-WWFrSYME1Var1GaWayLu2j7KX74=", + "dev": true + }, + "cacache": { + "version": "19.0.1", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-19.0.1.tgz", + "integrity": "sha512-hdsUxulXCi5STId78vRVYEtDAjq99ICAUktLTeTYsLoTE6Z8dS0c8pWNCxwdrk9YfJeobDZc2Y186hD/5ZQgFQ==", + "requires": { + "@npmcli/fs": "^4.0.0", + "fs-minipass": "^3.0.0", + "glob": "^10.2.2", + "lru-cache": "^10.0.1", + "minipass": "^7.0.3", + "minipass-collect": "^2.0.1", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^7.0.2", + "ssri": "^12.0.0", + "tar": "^7.4.3", + "unique-filename": "^4.0.0" + }, + "dependencies": { + "brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "requires": { - "delegates": "^1.0.0", - "readable-stream": "^2.0.6" + "balanced-match": "^1.0.0" } }, - "balanced-match": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "brace-expansion": { - "version": "1.1.11", - "bundled": true, - "dev": true, + "fs-minipass": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "minipass": "^7.0.3" } }, - "chownr": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "optional": true - }, - "code-point-at": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "bundled": true, - "dev": true - }, - "console-control-strings": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "core-util-is": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "optional": true - }, - "debug": { - "version": "2.6.9", - "bundled": true, - "dev": true, - "optional": true, + "glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", "requires": { - "ms": "2.0.0" + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" } }, - "deep-extend": { - "version": "0.5.1", - "bundled": true, - "dev": true, - "optional": true - }, - "delegates": { - "version": "1.0.0", - "bundled": true, - "dev": true, - "optional": true + "minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "requires": { + "brace-expansion": "^2.0.1" + } }, - "detect-libc": { - "version": "1.0.3", - "bundled": true, + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" + } + } + }, + "cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==" + }, + "cacheable-request": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-12.0.1.tgz", + "integrity": "sha512-Yo9wGIQUaAfIbk+qY0X4cDQgCosecfBe3V9NSyeY4qPC2SAkbCS4Xj79VP8WOzitpJUZKc/wsRCYF5ariDIwkg==", + "requires": { + "@types/http-cache-semantics": "^4.0.4", + "get-stream": "^9.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.4", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.1", + "responselike": "^3.0.0" + }, + "dependencies": { + "mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==" + } + } + }, + "caching-transform": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz", + "integrity": "sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA==", + "dev": true, + "requires": { + "hasha": "^5.0.0", + "make-dir": "^3.0.0", + "package-hash": "^4.0.0", + "write-file-atomic": "^3.0.0" + } + }, + "call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, + "requires": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + } + }, + "call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + } + }, + "call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dev": true, + "requires": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + } + }, + "camelcase": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz", + "integrity": "sha1-m7UwTS4LVmmLLHWLCKPqqdqlijk=", + "dev": true + }, + "caniuse-lite": { + "version": "1.0.30001696", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001696.tgz", + "integrity": "sha512-pDCPkvzfa39ehJtJ+OwGT/2yvT2SbjfHhiIW2LWOAcMQ7BzwxT/XuyUp4OTOd0XFWA6BKw0JalnBHgSi5DGJBQ==", + "dev": true + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "dependencies": { + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, - "optional": true - }, - "fs-minipass": { - "version": "1.2.5", - "bundled": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "requires": { + "readdirp": "^4.0.1" + } + }, + "chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==" + }, + "clean-for-publish": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clean-for-publish/-/clean-for-publish-1.0.4.tgz", + "integrity": "sha1-KZMj50qzSwXSIHBsWd+B3QTKAYo=", + "dev": true, + "requires": { + "fs-extra": "^0.26.2", + "glob": "~5.0.15", + "yargs": "~3.29.0" + }, + "dependencies": { + "fs-extra": { + "version": "0.26.7", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-0.26.7.tgz", + "integrity": "sha1-muH92UiXeY7at20JGM9C0MMYT6k=", "dev": true, - "optional": true, "requires": { - "minipass": "^2.2.1" + "graceful-fs": "^4.1.2", + "jsonfile": "^2.1.0", + "klaw": "^1.0.0", + "path-is-absolute": "^1.0.0", + "rimraf": "^2.2.8" } }, - "fs.realpath": { - "version": "1.0.0", - "bundled": true, + "jsonfile": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-2.4.0.tgz", + "integrity": "sha1-NzaitCi4e72gzIO1P6PWM6NcKug=", "dev": true, - "optional": true + "requires": { + "graceful-fs": "^4.1.6" + } }, - "gauge": { - "version": "2.7.4", - "bundled": true, + "rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", "dev": true, - "optional": true, "requires": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" + "glob": "^7.1.3" + }, + "dependencies": { + "glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } } - }, + } + } + }, + "clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true + }, + "cli": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cli/-/cli-1.0.1.tgz", + "integrity": "sha1-IoF1NPJL+klQw01TLUjsvGIbjBQ=", + "dev": true, + "requires": { + "exit": "0.1.2", + "glob": "^7.1.1" + }, + "dependencies": { "glob": { - "version": "7.1.2", - "bundled": true, + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", "dev": true, - "optional": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -2054,256 +6828,499 @@ "once": "^1.3.0", "path-is-absolute": "^1.0.0" } - }, - "has-unicode": { - "version": "2.0.1", - "bundled": true, - "dev": true, - "optional": true - }, - "iconv-lite": { - "version": "0.4.21", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "safer-buffer": "^2.1.0" - } - }, - "ignore-walk": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "minimatch": "^3.0.4" - } - }, - "inflight": { - "version": "1.0.6", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.3", - "bundled": true, + } + } + }, + "cliui": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", + "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", + "dev": true, + "requires": { + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wrap-ansi": "^2.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", "dev": true }, - "ini": { - "version": "1.3.5", - "bundled": true, - "dev": true, - "optional": true - }, "is-fullwidth-code-point": { "version": "1.0.0", - "bundled": true, + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", "dev": true, "requires": { "number-is-nan": "^1.0.0" } }, - "isarray": { - "version": "1.0.0", - "bundled": true, - "dev": true, - "optional": true - }, - "minimatch": { - "version": "3.0.4", - "bundled": true, - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "0.0.8", - "bundled": true, - "dev": true - }, - "minipass": { - "version": "2.2.4", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "^5.1.1", - "yallist": "^3.0.0" - } - }, - "minizlib": { - "version": "1.1.0", - "bundled": true, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", "dev": true, - "optional": true, "requires": { - "minipass": "^2.2.1" + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" } }, - "mkdirp": { - "version": "0.5.1", - "bundled": true, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", "dev": true, "requires": { - "minimist": "0.0.8" + "ansi-regex": "^2.0.0" } - }, - "ms": { - "version": "2.0.0", - "bundled": true, - "dev": true, - "optional": true - }, - "needle": { - "version": "2.2.0", - "bundled": true, + } + } + }, + "clone": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", + "integrity": "sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18=", + "dev": true + }, + "clone-buffer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/clone-buffer/-/clone-buffer-1.0.0.tgz", + "integrity": "sha1-4+JbIHrE5wGvch4staFnksrD3Fg=", + "dev": true + }, + "clone-stats": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/clone-stats/-/clone-stats-1.0.0.tgz", + "integrity": "sha1-s3gt/4u1R04Yuba/D9/ngvh3doA=", + "dev": true + }, + "cloneable-readable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/cloneable-readable/-/cloneable-readable-1.1.3.tgz", + "integrity": "sha512-2EF8zTQOxYq70Y4XKtorQupqF0m49MBz2/yf5Bj+MHjvpG3Hy7sImifnqD6UA+TKYxeSV+u6qqQPawN5UvnpKQ==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "process-nextick-args": "^2.0.0", + "readable-stream": "^2.3.5" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, - "optional": true, "requires": { - "debug": "^2.1.2", - "iconv-lite": "^0.4.4", - "sax": "^1.2.4" + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" } }, - "node-pre-gyp": { - "version": "0.10.0", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "detect-libc": "^1.0.2", - "mkdirp": "^0.5.1", - "needle": "^2.2.0", - "nopt": "^4.0.1", - "npm-packlist": "^1.1.6", - "npmlog": "^4.0.2", - "rc": "^1.1.7", - "rimraf": "^2.6.1", - "semver": "^5.3.0", - "tar": "^4" - } + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true }, - "nopt": { - "version": "4.0.1", - "bundled": true, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, - "optional": true, "requires": { - "abbrev": "1", - "osenv": "^0.1.4" + "safe-buffer": "~5.1.0" } - }, - "npm-bundled": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "optional": true - }, - "npm-packlist": { - "version": "1.1.10", - "bundled": true, - "dev": true, - "optional": true, + } + } + }, + "code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "dev": true + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "combyne": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/combyne/-/combyne-0.8.1.tgz", + "integrity": "sha1-WJ3kcEXVcVbcHs4YXWTDidzLR9g=", + "dev": true + }, + "commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dev": true, + "requires": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==" + }, + "console-browserify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.1.0.tgz", + "integrity": "sha1-8CQcRXMKn8YyOyBtvzjtx0HQuxA=", + "dev": true, + "requires": { + "date-now": "^0.1.4" + } + }, + "convert-source-map": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", + "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.1" + }, + "dependencies": { + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + } + } + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "dependencies": { + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "requires": { - "ignore-walk": "^3.0.1", - "npm-bundled": "^1.0.1" + "isexe": "^2.0.0" } - }, - "npmlog": { - "version": "4.1.2", - "bundled": true, + } + } + }, + "date-now": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/date-now/-/date-now-0.1.4.tgz", + "integrity": "sha1-6vQ5/U1ISK105cx9vvIAZyueNFs=", + "dev": true + }, + "debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "requires": { + "ms": "^2.1.3" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "requires": { + "mimic-response": "^3.1.0" + } + }, + "default-require-extensions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-3.0.1.tgz", + "integrity": "sha512-eXTJmRbm2TIt9MgWTsOH1wEuhew6XGZcMeGKCtLedIg/NCsg1iBePXkceTdK4Fii7pzmN9tGsZhKzZ4h7O/fxw==", + "dev": true, + "requires": { + "strip-bom": "^4.0.0" + } + }, + "defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==" + }, + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, + "define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", + "dev": true, + "requires": { + "object-keys": "^1.0.12" + } + }, + "detect-libc": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz", + "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==" + }, + "diff": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-7.0.0.tgz", + "integrity": "sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw==", + "dev": true + }, + "dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "requires": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + } + }, + "duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "dev": true, + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, - "optional": true, "requires": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" } }, - "number-is-nan": { - "version": "1.0.1", - "bundled": true, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, - "object-assign": { - "version": "4.1.1", - "bundled": true, - "dev": true, - "optional": true - }, - "once": { - "version": "1.4.0", - "bundled": true, - "dev": true, - "requires": { - "wrappy": "1" - } - }, - "os-homedir": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "optional": true - }, - "os-tmpdir": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "optional": true - }, - "osenv": { - "version": "0.1.5", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "os-homedir": "^1.0.0", - "os-tmpdir": "^1.0.0" - } - }, - "path-is-absolute": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "optional": true - }, - "process-nextick-args": { - "version": "2.0.0", - "bundled": true, - "dev": true, - "optional": true - }, - "rc": { - "version": "1.2.7", - "bundled": true, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, - "optional": true, "requires": { - "deep-extend": "^0.5.1", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "dependencies": { - "minimist": { - "version": "1.2.0", - "bundled": true, - "dev": true, - "optional": true - } + "safe-buffer": "~5.1.0" } - }, + } + } + }, + "eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "electron-to-chromium": { + "version": "1.5.90", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.90.tgz", + "integrity": "sha512-C3PN4aydfW91Natdyd449Kw+BzhLmof6tzy5W1pFC5SpQxVXT+oyiyOG9AgYYSN9OdA/ik3YkCrpwqI8ug5Tug==", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "requires": { + "once": "^1.4.0" + } + }, + "env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==" + }, + "err-code": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==" + }, + "es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true + }, + "es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "requires": { + "es-errors": "^1.3.0" + } + }, + "es6-error": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", + "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", + "dev": true + }, + "escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true + }, + "escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true + }, + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", + "dev": true + }, + "exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=", + "dev": true + }, + "exponential-backoff": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/exponential-backoff/-/exponential-backoff-3.1.1.tgz", + "integrity": "sha512-dX7e/LHVJ6W3DE1MHWi9S1EYzDESENfLrYohG2G++ovZrYOkm4Knwa0mc1cn84xJOR4KEU0WSchhLbd0UklbHw==" + }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "dev": true + }, + "fast-fifo": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", + "integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==" + }, + "fdir": { + "version": "6.4.4", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", + "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==", + "requires": {} + }, + "find-cache-dir": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", + "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", + "dev": true, + "requires": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + } + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true + }, + "flush-write-stream": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", + "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" + }, + "dependencies": { "readable-stream": { - "version": "2.3.6", - "bundled": true, + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, - "optional": true, "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -2314,208 +7331,186 @@ "util-deprecate": "~1.0.1" } }, - "rimraf": { - "version": "2.6.2", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "glob": "^7.0.5" - } - }, "safe-buffer": { - "version": "5.1.1", - "bundled": true, + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, - "safer-buffer": { - "version": "2.1.2", - "bundled": true, - "dev": true, - "optional": true - }, - "sax": { - "version": "1.2.4", - "bundled": true, - "dev": true, - "optional": true - }, - "semver": { - "version": "5.5.0", - "bundled": true, - "dev": true, - "optional": true - }, - "set-blocking": { - "version": "2.0.0", - "bundled": true, - "dev": true, - "optional": true - }, - "signal-exit": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "optional": true - }, - "string-width": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - }, "string_decoder": { "version": "1.1.1", - "bundled": true, + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, - "optional": true, "requires": { "safe-buffer": "~5.1.0" } - }, - "strip-ansi": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "strip-json-comments": { - "version": "2.0.1", - "bundled": true, - "dev": true, - "optional": true - }, - "tar": { - "version": "4.4.1", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "chownr": "^1.0.1", - "fs-minipass": "^1.2.5", - "minipass": "^2.2.4", - "minizlib": "^1.1.0", - "mkdirp": "^0.5.0", - "safe-buffer": "^5.1.1", - "yallist": "^3.0.2" - } - }, - "util-deprecate": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "optional": true - }, - "wide-align": { - "version": "1.1.2", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "string-width": "^1.0.2" - } - }, - "wrappy": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "yallist": { - "version": "3.0.2", - "bundled": true, - "dev": true } } }, - "fstream": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.11.tgz", - "integrity": "sha1-XB+x8RdHcRTwYyoOtLcbPLD9MXE=", + "for-each": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "dev": true, + "requires": { + "is-callable": "^1.2.7" + } + }, + "foreachasync": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/foreachasync/-/foreachasync-3.0.0.tgz", + "integrity": "sha1-VQKYfchxS+M5IJfzLgBxyd7gfPY=", + "dev": true + }, + "foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "requires": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "dependencies": { + "signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==" + } + } + }, + "form-data-encoder": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-4.0.2.tgz", + "integrity": "sha512-KQVhvhK8ZkWzxKxOr56CPulAhH3dobtuQ4+hNQ+HekH/Wp5gSOafqRAeTphQUJAIk0GBvHZgJ2ZGRWd5kphMuw==" + }, + "fromentries": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fromentries/-/fromentries-1.3.2.tgz", + "integrity": "sha512-cHEpEQHUg0f8XdtZCc2ZAhrHzKzT0MrFUTcvx+hfxYu7rGMDc5SKoXFh+n4YigxsHXRzc6OrCshdR1bWH6HHyg==", + "dev": true + }, + "fs-extra": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", + "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", "requires": { "graceful-fs": "^4.1.2", - "inherits": "~2.0.0", - "mkdirp": ">=0.5 0", - "rimraf": "2" + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + } + }, + "fs-mkdirp-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-mkdirp-stream/-/fs-mkdirp-stream-1.0.0.tgz", + "integrity": "sha1-C3gV/DIBxqaeFNuYzgmMFpNSWes=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.11", + "through2": "^2.0.3" } }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "dev": true }, - "gauge": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz", - "integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=", + "gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true + }, + "get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, "requires": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + } + }, + "get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true + }, + "get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "requires": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" } }, - "get-value": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", - "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", - "dev": true - }, - "getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "get-stream": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-9.0.1.tgz", + "integrity": "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==", "requires": { - "assert-plus": "^1.0.0" + "@sec-ant/readable-stream": "^0.4.1", + "is-stream": "^4.0.1" + }, + "dependencies": { + "is-stream": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz", + "integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==" + } } }, "glob": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", - "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "version": "5.0.15", + "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", + "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "dev": true, "requires": { - "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", - "minimatch": "^3.0.4", + "minimatch": "2 || 3", "once": "^1.3.0", "path-is-absolute": "^1.0.0" } }, - "glob-base": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz", - "integrity": "sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q=", - "dev": true, - "optional": true, - "requires": { - "glob-parent": "^2.0.0", - "is-glob": "^2.0.0" - } - }, "glob-parent": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-2.0.0.tgz", - "integrity": "sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg=", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", + "integrity": "sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==", "dev": true, "requires": { - "is-glob": "^2.0.0" + "is-glob": "^3.1.0", + "path-dirname": "^1.0.0" } }, "glob-stream": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/glob-stream/-/glob-stream-6.1.0.tgz", - "integrity": "sha1-cEXJlBOz65SIjYOrRtC0BMx73eQ=", + "integrity": "sha512-uMbLGAP3S2aDOHUDfdoYcdIePUCfysbAd0IAoWVZbeGU/oNQ8asHVSshLDJUPWxfzj8zsCG7/XeHPHTtow0nsw==", "dev": true, "requires": { "extend": "^3.0.0", @@ -2530,304 +7525,233 @@ "unique-stream": "^2.0.2" }, "dependencies": { - "glob-parent": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", - "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", + "glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", "dev": true, "requires": { - "is-glob": "^3.1.0", - "path-dirname": "^1.0.0" + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" } }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "dev": true }, - "is-glob": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", - "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "dev": true, "requires": { - "is-extglob": "^2.1.0" + "safe-buffer": "~5.1.0" } } } }, "globals": { - "version": "9.18.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", - "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==", + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true }, - "graceful-fs": { - "version": "4.1.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", - "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=" - }, - "growl": { - "version": "1.10.5", - "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", - "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", "dev": true }, - "handlebars": { - "version": "4.0.12", - "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.12.tgz", - "integrity": "sha512-RhmTekP+FZL+XNhwS1Wf+bTTZpdLougwt5pcgA1tuz6Jcx0fpH/7z0qd71RKnZHBCxIRBHfBOnio4gViPemNzA==", - "dev": true, - "requires": { - "async": "^2.5.0", - "optimist": "^0.6.1", - "source-map": "^0.6.1", - "uglify-js": "^3.1.4" + "got": { + "version": "14.4.7", + "resolved": "https://registry.npmjs.org/got/-/got-14.4.7.tgz", + "integrity": "sha512-DI8zV1231tqiGzOiOzQWDhsBmncFW7oQDH6Zgy6pDPrqJuVZMtoSgPLLsBZQj8Jg4JFfwoOsDA8NGtLQLnIx2g==", + "requires": { + "@sindresorhus/is": "^7.0.1", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^12.0.1", + "decompress-response": "^6.0.0", + "form-data-encoder": "^4.0.2", + "http2-wrapper": "^2.2.1", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^4.0.1", + "responselike": "^3.0.0", + "type-fest": "^4.26.1" }, "dependencies": { - "async": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.1.tgz", - "integrity": "sha512-fNEiL2+AZt6AlAw/29Cr0UDe4sRAHCpEHh54WMz+Bb7QfNcFw4h3loofyJpLeQs4Yx7yuqu/2dLgM5hKOs6HlQ==", - "dev": true, - "requires": { - "lodash": "^4.17.10" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true + "type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==" } } }, - "har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" + "graceful-fs": { + "version": "4.2.9", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", + "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" }, - "har-validator": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.0.tgz", - "integrity": "sha512-+qnmNjI4OfH2ipQ9VQOw23bBd/ibtfbVdK2fYbY4acTDqKTW/YDp9McimZdDbG8iV9fZizUqQMD5xvriB146TA==", - "requires": { - "ajv": "^5.3.0", - "har-schema": "^2.0.0" - } + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true }, - "has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dev": true, "requires": { - "ansi-regex": "^2.0.0" + "es-define-property": "^1.0.0" } }, - "has-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", - "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", - "dev": true - }, "has-symbols": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.0.tgz", - "integrity": "sha1-uhqPGvKg/DllD1yFA2dwQSIGO0Q=", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", "dev": true }, - "has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=" - }, - "has-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", - "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", + "has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "dev": true, "requires": { - "get-value": "^2.0.6", - "has-values": "^1.0.0", - "isobject": "^3.0.0" - }, - "dependencies": { - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - } + "has-symbols": "^1.0.3" } }, - "has-values": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", - "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", + "hasha": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-5.2.2.tgz", + "integrity": "sha512-Hrp5vIK/xr5SkeN2onO32H0MgNZ0f17HRNH39WfL0SYUNOTZ5Lz1TJ8Pajo/87dYGEFlLMm7mIc/k/s6Bvz9HQ==", "dev": true, "requires": { - "is-number": "^3.0.0", - "kind-of": "^4.0.0" - }, - "dependencies": { - "is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "kind-of": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", - "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", - "dev": true, - "requires": { - "is-buffer": "^1.1.5" - } - } + "is-stream": "^2.0.0", + "type-fest": "^0.8.0" } }, - "hasha": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/hasha/-/hasha-2.2.0.tgz", - "integrity": "sha1-eNfL/B5tZjA/55g3NlmEUXsvbuE=", + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dev": true, - "optional": true, "requires": { - "is-stream": "^1.0.1", - "pinkie-promise": "^2.0.0" + "function-bind": "^1.1.2" } }, "he": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz", - "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", "dev": true }, - "home-or-tmp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/home-or-tmp/-/home-or-tmp-2.0.0.tgz", - "integrity": "sha1-42w/LSyufXRqhX440Y1fMqeILbg=", - "dev": true, + "html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", "requires": { - "os-homedir": "^1.0.0", - "os-tmpdir": "^1.0.1" + "agent-base": "^7.1.0", + "debug": "^4.3.4" } }, - "htmlparser2": { - "version": "3.8.3", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.8.3.tgz", - "integrity": "sha1-mWwosZFRaovoZQGn15dX5ccMEGg=", - "dev": true, + "http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", "requires": { - "domelementtype": "1", - "domhandler": "2.3", - "domutils": "1.5", - "entities": "1.0", - "readable-stream": "1.1" - }, - "dependencies": { - "isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", - "dev": true - }, - "readable-stream": { - "version": "1.1.14", - "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", - "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.1", - "isarray": "0.0.1", - "string_decoder": "~0.10.x" - } - }, - "string_decoder": { - "version": "0.10.31", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", - "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", - "dev": true - } + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" } }, - "http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" + "agent-base": "^7.1.2", + "debug": "4" } }, "iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "version": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "optional": true, "requires": { - "safer-buffer": ">= 2.1.2 < 3" + "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "ieee754": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.8.tgz", - "integrity": "sha1-vjPUCsEO8ZJnAfbwii2G+/0a0+Q=", + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==", "dev": true }, - "ignore-walk": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ignore-walk/-/ignore-walk-3.0.1.tgz", - "integrity": "sha512-DTVlMx3IYPe0/JJcYP7Gxg7ttZZu3IInhuEhbchuqneY9wWe5Ojy2mXLBaQFUQmo0AW2r3qG7m1mg86js+gnlQ==", - "requires": { - "minimatch": "^3.0.4" - } + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==" + }, + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true }, "inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, "requires": { "once": "^1.3.0", "wrappy": "1" } }, "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true }, "ini": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", - "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==" - }, - "invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "dev": true, - "requires": { - "loose-envify": "^1.0.0" - } + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true }, "invert-kv": { "version": "1.0.0", @@ -2835,6 +7759,22 @@ "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", "dev": true }, + "ip-address": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-9.0.5.tgz", + "integrity": "sha512-zHtQzGojZXTwZTHQqra+ETKd4Sn3vgi7uBmlPoXVWZqYvuKmtI0l/VZTjqGmJY9x88GGOaZ9+G9ES8hC4T4X8g==", + "requires": { + "jsbn": "1.1.0", + "sprintf-js": "^1.1.3" + }, + "dependencies": { + "sprintf-js": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", + "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==" + } + } + }, "is-absolute": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz", @@ -2845,23 +7785,14 @@ "is-windows": "^1.0.1" } }, - "is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dev": true, - "requires": { - "kind-of": "^3.0.2" - } - }, - "is-binary-path": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", - "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "is-arguments": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.2.0.tgz", + "integrity": "sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==", "dev": true, - "optional": true, "requires": { - "binary-extensions": "^1.0.0" + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" } }, "is-buffer": { @@ -2870,87 +7801,42 @@ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", "dev": true }, - "is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dev": true, - "requires": { - "kind-of": "^3.0.2" - } - }, - "is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dev": true, - "requires": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - }, - "dependencies": { - "kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "dev": true - } - } - }, - "is-dotfile": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-dotfile/-/is-dotfile-1.0.3.tgz", - "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=", - "dev": true, - "optional": true - }, - "is-equal-shallow": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz", - "integrity": "sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ=", - "dev": true, - "optional": true, - "requires": { - "is-primitive": "^2.0.0" - } - }, - "is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", + "is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", "dev": true }, "is-extglob": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz", - "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", "dev": true }, - "is-finite": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.0.2.tgz", - "integrity": "sha1-zGZ3aVYCvlUO8R6LSqYwU0K20Ko=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" + }, + "is-generator-function": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz", + "integrity": "sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==", + "dev": true, "requires": { - "number-is-nan": "^1.0.0" + "call-bound": "^1.0.3", + "get-proto": "^1.0.0", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" } }, "is-glob": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz", - "integrity": "sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM=", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", "dev": true, "requires": { - "is-extglob": "^1.0.0" + "is-extglob": "^2.1.0" } }, "is-negated-glob": { @@ -2959,47 +7845,24 @@ "integrity": "sha1-aRC8pdqMleeEtXUbl2z1oQ/uNtI=", "dev": true }, - "is-number": { + "is-plain-obj": { "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", - "integrity": "sha1-Afy7s5NGOlSPL0ZszhbezknbkI8=", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^3.0.2" - } + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true }, - "is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "is-regex": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", "dev": true, "requires": { - "isobject": "^3.0.1" - }, - "dependencies": { - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - } + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" } }, - "is-posix-bracket": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz", - "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=", - "dev": true, - "optional": true - }, - "is-primitive": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz", - "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=", - "dev": true, - "optional": true - }, "is-relative": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz", @@ -3010,16 +7873,25 @@ } }, "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true + }, + "is-typed-array": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", "dev": true, - "optional": true + "requires": { + "which-typed-array": "^1.1.16" + } }, "is-typedarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true }, "is-unc-path": { "version": "1.0.0", @@ -3030,6 +7902,12 @@ "unc-path-regex": "^0.1.2" } }, + "is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true + }, "is-utf8": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", @@ -3051,84 +7929,146 @@ "isarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true }, "isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" }, - "isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true + }, + "istanbul-lib-hook": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-3.0.0.tgz", + "integrity": "sha512-Pt/uge1Q9s+5VAZ+pCo16TYMWPBIl+oaNIjgLQxcX0itS6ueeaA+pEfThZpH8WxhFgCiEb8sAJY6MdUKgiIWaQ==", "dev": true, - "optional": true, "requires": { - "isarray": "1.0.0" + "append-transform": "^2.0.0" } }, - "isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "istanbul": { - "version": "0.4.5", - "resolved": "https://registry.npmjs.org/istanbul/-/istanbul-0.4.5.tgz", - "integrity": "sha1-ZcfXPUxNqE1POsMQuRj7C4Azczs=", - "dev": true, - "requires": { - "abbrev": "1.0.x", - "async": "1.x", - "escodegen": "1.8.x", - "esprima": "2.7.x", - "glob": "^5.0.15", - "handlebars": "^4.0.1", - "js-yaml": "3.x", - "mkdirp": "0.5.x", - "nopt": "3.x", - "once": "1.x", - "resolve": "1.1.x", - "supports-color": "^3.1.0", - "which": "^1.1.1", - "wordwrap": "^1.0.0" + "istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "requires": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + } + }, + "istanbul-lib-processinfo": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-processinfo/-/istanbul-lib-processinfo-2.0.3.tgz", + "integrity": "sha512-NkwHbo3E00oybX6NGJi6ar0B29vxyvNwoC7eJ4G4Yq28UfY758Hgn/heV8VRFhevPED4LXfFz0DQ8z/0kw9zMg==", + "dev": true, + "requires": { + "archy": "^1.0.0", + "cross-spawn": "^7.0.3", + "istanbul-lib-coverage": "^3.2.0", + "p-map": "^3.0.0", + "rimraf": "^3.0.0", + "uuid": "^8.3.2" }, "dependencies": { - "abbrev": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.0.9.tgz", - "integrity": "sha1-kbR5JYinc4wl813W9jdSovh3YTU=", - "dev": true + "p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, + "requires": { + "aggregate-error": "^3.0.0" + } }, - "glob": { - "version": "5.0.15", - "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", - "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true + } + } + }, + "istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "requires": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "dependencies": { + "make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", "dev": true, "requires": { - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "2 || 3", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "semver": "^7.5.3" } }, "supports-color": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", - "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, "requires": { - "has-flag": "^1.0.0" + "has-flag": "^4.0.0" } } } }, + "istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "istanbul-reports": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "dev": true, + "requires": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + } + }, + "jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "requires": { + "@isaacs/cliui": "^8.0.2", + "@pkgjs/parseargs": "^0.11.0" + } + }, "jmespath": { - "version": "0.15.0", - "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.15.0.tgz", - "integrity": "sha1-o/Iiqarp+Wb10nx5ZRDigJF2Qhc=", + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz", + "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==", "dev": true }, "js-beautify": { @@ -3140,18 +8080,29 @@ "config-chain": "~1.1.5", "mkdirp": "~0.5.0", "nopt": "~3.0.1" + }, + "dependencies": { + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1" + } + } } }, "js-tokens": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz", - "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", "dev": true }, "js-yaml": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", - "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dev": true, "requires": { "argparse": "^1.0.7", @@ -3167,114 +8118,164 @@ } }, "jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", - "optional": true + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==" }, "jsesc": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz", - "integrity": "sha1-RsP+yMGJKxKwgz25vHYiF226s0s=", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", "dev": true }, "jshint": { - "version": "2.9.6", - "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.9.6.tgz", - "integrity": "sha512-KO9SIAKTlJQOM4lE64GQUtGBRpTOuvbrRrSZw3AhUxMNG266nX9hK2cKA4SBhXOj0irJGyNyGSLT62HGOVDEOA==", + "version": "2.13.4", + "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.13.4.tgz", + "integrity": "sha512-HO3bosL84b2qWqI0q+kpT/OpRJwo0R4ivgmxaO848+bo10rc50SkPnrtwSFXttW0ym4np8jbJvLwk5NziB7jIw==", "dev": true, "requires": { "cli": "~1.0.0", "console-browserify": "1.1.x", "exit": "0.1.x", "htmlparser2": "3.8.x", - "lodash": "~4.17.10", + "lodash": "~4.17.21", "minimatch": "~3.0.2", - "phantom": "~4.0.1", - "phantomjs-prebuilt": "~2.1.7", - "shelljs": "0.3.x", - "strip-json-comments": "1.0.x", - "unicode-5.2.0": "^0.7.5" + "strip-json-comments": "1.0.x" }, "dependencies": { - "strip-json-comments": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-1.0.4.tgz", - "integrity": "sha1-HhX7ysl9Pumb8tc7TGVrCCu6+5E=", + "dom-serializer": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", + "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", + "dev": true, + "requires": { + "domelementtype": "^2.0.1", + "entities": "^2.0.0" + }, + "dependencies": { + "domelementtype": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", + "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==", + "dev": true + }, + "entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "dev": true + } + } + }, + "domelementtype": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", + "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==", + "dev": true + }, + "domhandler": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.3.0.tgz", + "integrity": "sha1-LeWaCCLVAn+r/28DLCsloqir5zg=", + "dev": true, + "requires": { + "domelementtype": "1" + } + }, + "domutils": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", + "integrity": "sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8=", + "dev": true, + "requires": { + "dom-serializer": "0", + "domelementtype": "1" + } + }, + "entities": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-1.0.0.tgz", + "integrity": "sha1-sph6o4ITR/zeZCsk/fyeT7cSvyY=", + "dev": true + }, + "htmlparser2": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.8.3.tgz", + "integrity": "sha1-mWwosZFRaovoZQGn15dX5ccMEGg=", + "dev": true, + "requires": { + "domelementtype": "1", + "domhandler": "2.3", + "domutils": "1.5", + "entities": "1.0", + "readable-stream": "1.1" + } + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true + }, + "minimatch": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.8.tgz", + "integrity": "sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", "dev": true } } }, - "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "json-schema-traverse": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz", - "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=" + "json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" }, - "json-stable-stringify": { + "json-stable-stringify-without-jsonify": { "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz", - "integrity": "sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8=", - "dev": true, - "requires": { - "jsonify": "~0.0.0" - } - }, - "json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true }, "json5": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", - "dev": true + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==" }, "jsonfile": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-2.4.0.tgz", - "integrity": "sha1-NzaitCi4e72gzIO1P6PWM6NcKug=", - "dev": true, + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", + "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", "requires": { "graceful-fs": "^4.1.6" } }, - "jsonify": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", - "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=", - "dev": true - }, - "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "kew": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/kew/-/kew-0.7.0.tgz", - "integrity": "sha1-edk9LTM2PW/dKXCzNdkUGtWR15s=", - "dev": true, - "optional": true - }, - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, + "keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "requires": { - "is-buffer": "^1.1.5" + "json-buffer": "3.0.1" } }, "klaw": { @@ -3287,12 +8288,44 @@ } }, "lazystream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.0.tgz", - "integrity": "sha1-9plf4PggOS9hOWvolGJAe7dxaOQ=", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.1.tgz", + "integrity": "sha512-b94GiNHQNy6JNTrt5w6zNyffMrNkXZb3KTkCZJb2V1xaEGCk093vkZ2jk3tpaeP33/OiXC+WvK9AxUebnf5nbw==", "dev": true, "requires": { "readable-stream": "^2.0.5" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } } }, "lcid": { @@ -3304,21 +8337,61 @@ "invert-kv": "^1.0.0" } }, - "lcov-parse": { - "version": "0.0.10", - "resolved": "https://registry.npmjs.org/lcov-parse/-/lcov-parse-0.0.10.tgz", - "integrity": "sha1-GwuP+ayceIklBYK3C3ExXZ2m2aM=", - "dev": true - }, "lcov-result-merger": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/lcov-result-merger/-/lcov-result-merger-3.1.0.tgz", - "integrity": "sha512-vGXaMNGZRr4cYvW+xMVg+rg7qd5DX9SbGXl+0S3k85+gRZVK4K7UvxPWzKb/qiMwe+4bx3EOrW2o4mbdb1WnsA==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/lcov-result-merger/-/lcov-result-merger-3.3.0.tgz", + "integrity": "sha512-Krg9p24jGaIT93RBMA8b5qLHDEiAXTavaTiEdMAZaJS93PsBKIcg/89cw/8rgeSfRuQX+I9x7h73SHFjCZ6cHg==", "dev": true, "requires": { "through2": "^2.0.3", "vinyl": "^2.1.0", - "vinyl-fs": "^3.0.2" + "vinyl-fs": "^3.0.2", + "yargs": "^16.2.0" + }, + "dependencies": { + "cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + }, + "y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true + }, + "yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "requires": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + } + } } }, "lead": { @@ -3330,374 +8403,457 @@ "flush-write-stream": "^1.0.2" } }, - "levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, "requires": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" + "p-locate": "^4.1.0" } }, "lodash": { - "version": "4.17.11", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz", - "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==" - }, - "log-driver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/log-driver/-/log-driver-1.2.7.tgz", - "integrity": "sha512-U7KCmLdqsGHBLeWqYlFA0V0Sl6P08EE1ZrmA9cxjUE0WVqT9qnyVDPz1kzpFEP0jdJuFnasWIfSd7fsaNXkpbg==", + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "lodash.flattendeep": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", + "integrity": "sha512-uHaJFihxmJcEX3kT4I23ABqKKalJ/zDrDg0lsFtc1h+3uw49SIJ5beyhx5ExVRti3AvKoOJngIj7xz3oylPdWQ==", "dev": true }, - "loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", "dev": true, "requires": { - "js-tokens": "^3.0.0 || ^4.0.0" + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" } }, - "map-cache": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", - "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", - "dev": true + "lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==" }, - "map-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", - "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", + "lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==" + }, + "make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", "dev": true, "requires": { - "object-visit": "^1.0.0" + "semver": "^6.0.0" + }, + "dependencies": { + "semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true + } } }, - "math-random": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.1.tgz", - "integrity": "sha1-izqsWIuKZuSXXjzepn97sylgH6w=", - "dev": true, - "optional": true - }, - "micromatch": { - "version": "2.3.11", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz", - "integrity": "sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU=", - "dev": true, - "optional": true, - "requires": { - "arr-diff": "^2.0.0", - "array-unique": "^0.2.1", - "braces": "^1.8.2", - "expand-brackets": "^0.1.4", - "extglob": "^0.3.1", - "filename-regex": "^2.0.0", - "is-extglob": "^1.0.0", - "is-glob": "^2.0.1", - "kind-of": "^3.0.2", - "normalize-path": "^2.0.1", - "object.omit": "^2.0.0", - "parse-glob": "^3.0.4", - "regex-cache": "^0.4.2" + "make-fetch-happen": { + "version": "14.0.3", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-14.0.3.tgz", + "integrity": "sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ==", + "requires": { + "@npmcli/agent": "^3.0.0", + "cacache": "^19.0.1", + "http-cache-semantics": "^4.1.1", + "minipass": "^7.0.2", + "minipass-fetch": "^4.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^1.0.0", + "proc-log": "^5.0.0", + "promise-retry": "^2.0.1", + "ssri": "^12.0.0" + }, + "dependencies": { + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" + } } }, - "mime-db": { - "version": "1.36.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.36.0.tgz", - "integrity": "sha512-L+xvyD9MkoYMXb1jAmzI/lWYAxAMCPvIBSWur0PZ5nOf5euahRLVqH//FKW9mWp2lkqUgYiXPgkzfMUFi4zVDw==" + "math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true }, - "mime-types": { - "version": "2.1.20", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.20.tgz", - "integrity": "sha512-HrkrPaP9vGuWbLK1B1FfgAkbqNjIuy4eHlIYnFi7kamZyLLrGlo2mpcx0bBmNpKqBtYtAfGbodDddIgddSJC2A==", - "requires": { - "mime-db": "~1.36.0" - } + "mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==" }, "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, "requires": { "brace-expansion": "^1.1.7" } }, "minimist": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", - "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", + "dev": true }, "minipass": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-2.3.4.tgz", - "integrity": "sha512-mlouk1OHlaUE8Odt1drMtG1bAJA4ZA6B/ehysgV0LUIrDHdKgo1KorZq3pK0b/7Z7LJIQ12MNM6aC+Tn6lUZ5w==", + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", + "integrity": "sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ==", + "requires": { + "yallist": "^4.0.0" + } + }, + "minipass-collect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-2.0.1.tgz", + "integrity": "sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==", "requires": { - "safe-buffer": "^5.1.2", - "yallist": "^3.0.0" + "minipass": "^7.0.3" }, "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" + } + } + }, + "minipass-fetch": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-4.0.1.tgz", + "integrity": "sha512-j7U11C5HXigVuutxebFadoYBbd7VSdZWggSe64NVdvWNBqGAiXPL2QVCehjmw7lY1oF9gOllYbORh+hiNgfPgQ==", + "requires": { + "encoding": "^0.1.13", + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^3.0.1" + }, + "dependencies": { + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" } } }, - "minizlib": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.1.0.tgz", - "integrity": "sha512-4T6Ur/GctZ27nHfpt9THOdRZNgyJ9FZchYO1ceg5S8Q3DNLCKYy44nCZzgCJgcvx2UM8czmqak5BCxJMrq37lA==", + "minipass-flush": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", + "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "requires": { + "minipass": "^3.0.0" + } + }, + "minipass-pipeline": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", + "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "requires": { + "minipass": "^3.0.0" + } + }, + "minipass-sized": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", + "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", "requires": { - "minipass": "^2.2.1" + "minipass": "^3.0.0" } }, - "mixin-deep": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz", - "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==", - "dev": true, + "minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", "requires": { - "for-in": "^1.0.2", - "is-extendable": "^1.0.1" + "minipass": "^7.1.2" }, "dependencies": { - "is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dev": true, - "requires": { - "is-plain-object": "^2.0.4" - } + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" } } }, "mkdirp": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", - "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "dev": true, "requires": { - "minimist": "0.0.8" + "minimist": "^1.2.5" } }, "mocha": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz", - "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==", - "dev": true, - "requires": { - "browser-stdout": "1.3.1", - "commander": "2.15.1", - "debug": "3.1.0", - "diff": "3.5.0", - "escape-string-regexp": "1.0.5", - "glob": "7.1.2", - "growl": "1.10.5", - "he": "1.1.1", - "minimatch": "3.0.4", - "mkdirp": "0.5.1", - "supports-color": "5.4.0" - }, - "dependencies": { - "commander": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", - "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==", + "version": "11.4.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-11.4.0.tgz", + "integrity": "sha512-O6oi5Y9G6uu8f9iqXR6iKNLWHLRex3PKbmHynfpmUnMJJGrdgXh8ZmS85Ei5KR2Gnl+/gQ9s+Ktv5CqKybNw4A==", + "dev": true, + "requires": { + "browser-stdout": "^1.3.1", + "chokidar": "^4.0.1", + "debug": "^4.3.5", + "diff": "^7.0.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^10.4.5", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "picocolors": "^1.1.1", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^17.7.2", + "yargs-parser": "^21.1.1", + "yargs-unparser": "^2.0.0" + }, + "dependencies": { + "argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", "dev": true }, - "debug": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0" + } + }, + "cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + } + }, + "find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, "requires": { - "ms": "2.0.0" + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" } }, "glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "requires": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "dependencies": { + "minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "requires": { + "brace-expansion": "^2.0.1" + } + } + } + }, + "js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "requires": { + "argparse": "^2.0.1" + } + }, + "locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "requires": { + "p-locate": "^5.0.0" + } + }, + "minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "requires": { + "brace-expansion": "^2.0.1" + } + }, + "minipass": { "version": "7.1.2", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", - "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true + }, + "p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "yocto-queue": "^0.1.0" } }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "requires": { + "p-limit": "^3.0.2" + } + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true }, - "supports-color": { - "version": "5.4.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + }, + "y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true + }, + "yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dev": true, "requires": { - "has-flag": "^3.0.0" + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" } + }, + "yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true } } }, "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "nan": { - "version": "2.11.1", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.11.1.tgz", - "integrity": "sha512-iji6k87OSXa0CcrLl9z+ZiYSuR2o+c0bGuNmXdrhTQTakxytAFsC56SArGYoiHlJlFoHSnvmhpceZJaXkVuOtA==" + "version": "2.23.1", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.23.1.tgz", + "integrity": "sha512-r7bBUGKzlqk8oPBDYxt6Z0aEdF1G1rwlMcLk8LCOMbOzf0mG+JUfUzG4fIMWwHWP0iyaLWEQZJmtB7nOHEm/qw==" }, - "nanomatch": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", - "dev": true, - "optional": true, - "requires": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "fragment-cache": "^0.2.1", - "is-windows": "^1.0.2", - "kind-of": "^6.0.2", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "dependencies": { - "arr-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "dev": true, - "optional": true - }, - "array-unique": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "dev": true, - "optional": true - }, - "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true, - "optional": true - } - } + "negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==" }, - "needle": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/needle/-/needle-2.2.4.tgz", - "integrity": "sha512-HyoqEb4wr/rsoaIDfTH2aVL9nWtQqba2/HvMv+++m8u0dz808MaagKILxtfeSN7QU7nvbQ79zk3vYOJp9zsNEA==", + "node-fetch": { + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", "requires": { - "debug": "^2.1.2", - "iconv-lite": "^0.4.4", - "sax": "^1.2.4" - }, - "dependencies": { - "sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" - } + "whatwg-url": "^5.0.0" } }, "node-gyp": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-3.8.0.tgz", - "integrity": "sha512-3g8lYefrRRzvGeSowdJKAKyks8oUpLEd/DyPV4eMhVlhJ0aNaZqIrNUIPuEWWTAoPqyFkfGrM67MC69baqn6vA==", - "requires": { - "fstream": "^1.0.0", - "glob": "^7.0.3", - "graceful-fs": "^4.1.2", - "mkdirp": "^0.5.0", - "nopt": "2 || 3", - "npmlog": "0 || 1 || 2 || 3 || 4", - "osenv": "0", - "request": "^2.87.0", - "rimraf": "2", - "semver": "~5.3.0", - "tar": "^2.0.0", - "which": "1" - } - }, - "node-pre-gyp": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/node-pre-gyp/-/node-pre-gyp-0.11.0.tgz", - "integrity": "sha512-TwWAOZb0j7e9eGaf9esRx3ZcLaE5tQ2lvYy1pb5IAaG1a2e2Kv5Lms1Y4hpj+ciXJRofIxxlt5haeQ/2ANeE0Q==", + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-11.2.0.tgz", + "integrity": "sha512-T0S1zqskVUSxcsSTkAsLc7xCycrRYmtDHadDinzocrThjyQCn5kMlEBSj6H4qDbgsIOSLmmlRIeb0lZXj+UArA==", + "requires": { + "env-paths": "^2.2.0", + "exponential-backoff": "^3.1.1", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^14.0.3", + "nopt": "^8.0.0", + "proc-log": "^5.0.0", + "semver": "^7.3.5", + "tar": "^7.4.3", + "tinyglobby": "^0.2.12", + "which": "^5.0.0" + } + }, + "node-preload": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/node-preload/-/node-preload-0.2.1.tgz", + "integrity": "sha512-RM5oyBy45cLEoHqCeh+MNuFAxO0vTFBLskvQbOKnEE7YTTSN4tbN8QWDIPQ6L+WvKsB/qLEGpYe2ZZ9d4W9OIQ==", + "dev": true, "requires": { - "detect-libc": "^1.0.2", - "mkdirp": "^0.5.1", - "needle": "^2.2.1", - "nopt": "^4.0.1", - "npm-packlist": "^1.1.6", - "npmlog": "^4.0.2", - "rc": "^1.2.7", - "rimraf": "^2.6.1", - "semver": "^5.3.0", - "tar": "^4" - }, - "dependencies": { - "nopt": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.1.tgz", - "integrity": "sha1-0NRoWv1UFRk8jHUFYC0NF81kR00=", - "requires": { - "abbrev": "1", - "osenv": "^0.1.4" - } - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "tar": { - "version": "4.4.6", - "resolved": "https://registry.npmjs.org/tar/-/tar-4.4.6.tgz", - "integrity": "sha512-tMkTnh9EdzxyfW+6GK6fCahagXsnYk6kE6S9Gr9pjVdys769+laCTbodXDhPAjzVtEBazRgP0gYqOjnk9dQzLg==", - "requires": { - "chownr": "^1.0.1", - "fs-minipass": "^1.2.5", - "minipass": "^2.3.3", - "minizlib": "^1.1.0", - "mkdirp": "^0.5.0", - "safe-buffer": "^5.1.2", - "yallist": "^3.0.2" - } - } + "process-on-spawn": "^1.0.0" } }, - "nodegit-promise": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/nodegit-promise/-/nodegit-promise-4.0.0.tgz", - "integrity": "sha1-VyKxhPLfcycWEGSnkdLoQskWezQ=", - "requires": { - "asap": "~2.0.3" - } + "node-releases": { + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "dev": true }, "nopt": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", - "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-8.1.0.tgz", + "integrity": "sha512-ieGu42u/Qsa4TFktmaKEwM6MQH0pOWnaB3htzh0JRtx84+Mebc0cbZYN5bC+6WTZ4+77xrL9Pn5m7CV6VIkV7A==", "requires": { - "abbrev": "1" + "abbrev": "^3.0.0" + }, + "dependencies": { + "abbrev": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-3.0.1.tgz", + "integrity": "sha512-AO2ac6pjRB3SJmGJo+v5/aK6Omggp6fsLrs6wN9bd35ulu4cCwaAU9+7ZhXjeqHVkaHThLuzH0nZr0YpCDhygg==" + } } }, "normalize-path": { @@ -3709,147 +8865,165 @@ "remove-trailing-separator": "^1.0.1" } }, + "normalize-url": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.1.tgz", + "integrity": "sha512-IO9QvjUMWxPQQhs60oOu10CRkWCiZzSUkzbXGGV9pviYl1fXYcvkzQ5jV9z8Y6un8ARoVRl4EtC6v6jNqbaJ/w==" + }, "now-and-later": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/now-and-later/-/now-and-later-2.0.0.tgz", - "integrity": "sha1-vGHLtFbXnLMiB85HygUTb/Ln1u4=", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/now-and-later/-/now-and-later-2.0.1.tgz", + "integrity": "sha512-KGvQ0cB70AQfg107Xvs/Fbu+dGmZoTRJp2TaPwcwQm3/7PteUyN2BCgk8KBMPGBUXZdVwyWS8fDCGFygBm19UQ==", "dev": true, "requires": { "once": "^1.3.2" } }, - "npm-bundled": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/npm-bundled/-/npm-bundled-1.0.5.tgz", - "integrity": "sha512-m/e6jgWu8/v5niCUKQi9qQl8QdeEduFA96xHDDzFGqly0OOjI7c+60KM/2sppfnUU9JJagf+zs+yGhqSOFj71g==" - }, - "npm-packlist": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/npm-packlist/-/npm-packlist-1.1.11.tgz", - "integrity": "sha512-CxKlZ24urLkJk+9kCm48RTQ7L4hsmgSVzEk0TLGPzzyuFxD7VNgy5Sl24tOLMzQv773a/NeJ1ce1DKeacqffEA==", - "requires": { - "ignore-walk": "^3.0.1", - "npm-bundled": "^1.0.1" - } - }, - "npmlog": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz", - "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==", - "requires": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" - } - }, - "nth-check": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.1.tgz", - "integrity": "sha1-mSms32KPwsQQmN6rgqxYDPFJquQ=", - "dev": true, - "requires": { - "boolbase": "~1.0.0" - } - }, "number-is-nan": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" - }, - "oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "dev": true }, - "object-copy": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", - "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", - "dev": true, - "requires": { - "copy-descriptor": "^0.1.0", - "define-property": "^0.2.5", - "kind-of": "^3.0.3" + "nyc": { + "version": "17.1.0", + "resolved": "https://registry.npmjs.org/nyc/-/nyc-17.1.0.tgz", + "integrity": "sha512-U42vQ4czpKa0QdI1hu950XuNhYqgoM+ZF1HT+VuUHL9hPfDPVvNQyltmMqdE9bUHMVa+8yNbc3QKTj8zQhlVxQ==", + "dev": true, + "requires": { + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "caching-transform": "^4.0.0", + "convert-source-map": "^1.7.0", + "decamelize": "^1.2.0", + "find-cache-dir": "^3.2.0", + "find-up": "^4.1.0", + "foreground-child": "^3.3.0", + "get-package-type": "^0.1.0", + "glob": "^7.1.6", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-hook": "^3.0.0", + "istanbul-lib-instrument": "^6.0.2", + "istanbul-lib-processinfo": "^2.0.2", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.0.2", + "make-dir": "^3.0.0", + "node-preload": "^0.2.1", + "p-map": "^3.0.0", + "process-on-spawn": "^1.0.0", + "resolve-from": "^5.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "spawn-wrap": "^2.0.0", + "test-exclude": "^6.0.0", + "yargs": "^15.0.2" }, "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "cliui": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", + "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", + "dev": true, + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^6.2.0" + } + }, + "glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "p-map": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", "dev": true, "requires": { - "is-descriptor": "^0.1.0" + "aggregate-error": "^3.0.0" + } + }, + "wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + }, + "y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==", + "dev": true + }, + "yargs": { + "version": "15.4.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", + "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", + "dev": true, + "requires": { + "cliui": "^6.0.0", + "decamelize": "^1.2.0", + "find-up": "^4.1.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^4.2.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^18.1.2" + } + }, + "yargs-parser": { + "version": "18.1.3", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", + "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" } } } }, "object-keys": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.12.tgz", - "integrity": "sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", "dev": true }, - "object-visit": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", - "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", - "dev": true, - "requires": { - "isobject": "^3.0.0" - }, - "dependencies": { - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - } - } - }, "object.assign": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", - "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", - "dev": true, - "requires": { - "define-properties": "^1.1.2", - "function-bind": "^1.1.1", - "has-symbols": "^1.0.0", - "object-keys": "^1.0.11" - } - }, - "object.omit": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz", - "integrity": "sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo=", - "dev": true, - "optional": true, - "requires": { - "for-own": "^0.1.4", - "is-extendable": "^0.1.1" - } - }, - "object.pick": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", - "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", "dev": true, "requires": { - "isobject": "^3.0.1" - }, - "dependencies": { - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - } + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" } }, "once": { @@ -3860,38 +9034,6 @@ "wrappy": "1" } }, - "optimist": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", - "integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=", - "dev": true, - "requires": { - "minimist": "~0.0.1", - "wordwrap": "~0.0.2" - }, - "dependencies": { - "wordwrap": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz", - "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc=", - "dev": true - } - } - }, - "optionator": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.2.tgz", - "integrity": "sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=", - "dev": true, - "requires": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.4", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "wordwrap": "~1.0.0" - } - }, "ordered-read-streams": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ordered-read-streams/-/ordered-read-streams-1.0.1.tgz", @@ -3899,13 +9041,40 @@ "dev": true, "requires": { "readable-stream": "^2.0.1" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } } }, - "os-homedir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", - "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=" - }, "os-locale": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", @@ -3915,58 +9084,56 @@ "lcid": "^1.0.0" } }, - "os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=" - }, - "osenv": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz", - "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==", - "requires": { - "os-homedir": "^1.0.0", - "os-tmpdir": "^1.0.0" - } + "p-cancelable": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-4.0.1.tgz", + "integrity": "sha512-wBowNApzd45EIKdO1LaU+LrMBwAcjfPaYtVzV3lmfM3gf8Z4CHZsiIqlM8TZZ8okYvh5A1cP6gTfCRQtwUpaUg==" }, - "output-file-sync": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/output-file-sync/-/output-file-sync-1.1.2.tgz", - "integrity": "sha1-0KM+7+YaIF+suQCS6CZZjVJFznY=", + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, "requires": { - "graceful-fs": "^4.1.4", - "mkdirp": "^0.5.1", - "object-assign": "^4.1.0" + "p-try": "^2.0.0" } }, - "parse-glob": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz", - "integrity": "sha1-ssN2z7EfNVE7rdFz7wu246OIORw=", + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, - "optional": true, "requires": { - "glob-base": "^0.3.0", - "is-dotfile": "^1.0.0", - "is-extglob": "^1.0.0", - "is-glob": "^2.0.0" + "p-limit": "^2.2.0" } }, - "parse5": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-3.0.3.tgz", - "integrity": "sha512-rgO9Zg5LLLkfJF9E6CCmXlSE4UVceloys8JrFqCcHloC3usd/kJCyPDwH2SOlzix2j3xaP9sUX3e8+kvkuleAA==", + "p-map": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.3.tgz", + "integrity": "sha512-VkndIv2fIB99swvQoA65bm+fsmt6UNdGeIB0oxBs+WhAhdh08QA04JXpI7rbB9r08/nkbysKoya9rtDERYOYMA==" + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "package-hash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/package-hash/-/package-hash-4.0.0.tgz", + "integrity": "sha512-whdkPIooSu/bASggZ96BWVvZTRMOFxnyUG5PnTSGKoJE2gd5mbVNmR2Nj20QFzxYYgAXpoqC+AiXzl+UMRh7zQ==", "dev": true, "requires": { - "@types/node": "*" + "graceful-fs": "^4.1.15", + "hasha": "^5.0.0", + "lodash.flattendeep": "^4.4.0", + "release-zalgo": "^1.0.0" } }, - "pascalcase": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", - "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", - "dev": true + "package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==" }, "path-dirname": { "version": "1.0.2", @@ -3974,128 +9141,93 @@ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=", "dev": true }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, "path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" - }, - "pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", - "dev": true, - "optional": true - }, - "performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true }, - "phantom": { - "version": "4.0.12", - "resolved": "https://registry.npmjs.org/phantom/-/phantom-4.0.12.tgz", - "integrity": "sha512-Tz82XhtPmwCk1FFPmecy7yRGZG2btpzY2KI9fcoPT7zT9det0CcMyfBFPp1S8DqzsnQnm8ZYEfdy528mwVtksA==", - "dev": true, - "optional": true, - "requires": { - "phantomjs-prebuilt": "^2.1.16", - "split": "^1.0.1", - "winston": "^2.4.0" - } + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==" }, - "phantomjs-prebuilt": { - "version": "2.1.16", - "resolved": "https://registry.npmjs.org/phantomjs-prebuilt/-/phantomjs-prebuilt-2.1.16.tgz", - "integrity": "sha1-79ISpKOWbTZHaE6ouniFSb4q7+8=", - "dev": true, - "optional": true, + "path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", "requires": { - "es6-promise": "^4.0.3", - "extract-zip": "^1.6.5", - "fs-extra": "^1.0.0", - "hasha": "^2.2.0", - "kew": "^0.7.0", - "progress": "^1.1.8", - "request": "^2.81.0", - "request-progress": "^2.0.1", - "which": "^1.2.10" + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" }, "dependencies": { - "fs-extra": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-1.0.0.tgz", - "integrity": "sha1-zTzl9+fLYUWIP8rjGR6Yd/hYeVA=", - "dev": true, - "optional": true, - "requires": { - "graceful-fs": "^4.1.2", - "jsonfile": "^2.1.0", - "klaw": "^1.0.0" - } + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" } } }, - "pinkie": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", - "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", - "dev": true, - "optional": true + "picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true }, - "pinkie-promise": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", - "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "picomatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "peer": true + }, + "pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, - "optional": true, "requires": { - "pinkie": "^2.0.0" + "find-up": "^4.0.0" } }, - "posix-character-classes": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", - "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", - "dev": true, - "optional": true - }, - "prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", "dev": true }, - "preserve": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", - "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=", - "dev": true, - "optional": true - }, - "private": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", - "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", - "dev": true + "proc-log": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-5.0.0.tgz", + "integrity": "sha512-Azwzvl90HaF0aCz1JrDdXQykFakSSNPaPoiZ9fm5qJIMHioDZEi7OAdRwSm6rSoPtY3Qutnm3L7ogmg3dc+wbQ==" }, "process-nextick-args": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", - "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==" + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true }, - "progress": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/progress/-/progress-1.1.8.tgz", - "integrity": "sha1-4mDHj2Fhzdmw5WzD4Khd4Xx6V74=", + "process-on-spawn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.1.0.tgz", + "integrity": "sha512-JOnOPQ/8TZgjs1JIH/m9ni7FfimjNa/PRx7y/Wb5qdItsnhO0jE4AT7fC0HjC28DUQWDr50dwSYZLdRMlqDq3Q==", "dev": true, - "optional": true + "requires": { + "fromentries": "^1.2.0" + } }, - "promisify-node": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/promisify-node/-/promisify-node-0.3.0.tgz", - "integrity": "sha1-tLVaz5D6p9K4uQyjlomQhsAwYM8=", + "promise-retry": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", "requires": { - "nodegit-promise": "~4.0.0" + "err-code": "^2.0.2", + "retry": "^0.12.0" } }, "proto-list": { @@ -4104,16 +9236,10 @@ "integrity": "sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk=", "dev": true }, - "psl": { - "version": "1.1.29", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.1.29.tgz", - "integrity": "sha512-AeUmQ0oLN02flVHXWh9sSJF7mcdFq0ppid/JkErufc3hGIV/AMa8Fo9VgDo/cT2jFdOWoFvHp90qqBH54W+gjQ==" - }, "pump": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", - "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", - "dev": true, + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", "requires": { "end-of-stream": "^1.1.0", "once": "^1.3.1" @@ -4128,476 +9254,53 @@ "duplexify": "^3.6.0", "inherits": "^2.0.3", "pump": "^2.0.0" - } - }, - "punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" - }, - "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" - }, - "querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", - "dev": true - }, - "ramda": { - "version": "0.25.0", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.25.0.tgz", - "integrity": "sha512-GXpfrYVPwx3K7RQ6aYT8KPS8XViSXUVJT1ONhoKPE9VAleW42YE+U+8VEyGWt41EnEQW7gwecYJriTI0pKoecQ==" - }, - "randomatic": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.1.0.tgz", - "integrity": "sha512-KnGPVE0lo2WoXxIZ7cPR8YBpiol4gsSuOwDSg410oHh80ZMp5EiypNqL2K4Z77vJn6lB5rap7IkAmcUlalcnBQ==", - "dev": true, - "optional": true, - "requires": { - "is-number": "^4.0.0", - "kind-of": "^6.0.0", - "math-random": "^1.0.1" - }, - "dependencies": { - "is-number": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", - "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", - "dev": true, - "optional": true - }, - "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true, - "optional": true - } - } - }, - "rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "dependencies": { - "minimist": { - "version": "1.2.0", - "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" - } - } - }, - "readable-stream": { - "version": "2.3.6", - "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "readdirp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", - "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", - "dev": true, - "optional": true, - "requires": { - "graceful-fs": "^4.1.11", - "micromatch": "^3.1.10", - "readable-stream": "^2.0.2" }, "dependencies": { - "arr-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "dev": true, - "optional": true - }, - "array-unique": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "dev": true - }, - "braces": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "dev": true, - "optional": true, - "requires": { - "arr-flatten": "^1.1.0", - "array-unique": "^0.3.2", - "extend-shallow": "^2.0.1", - "fill-range": "^4.0.0", - "isobject": "^3.0.1", - "repeat-element": "^1.1.2", - "snapdragon": "^0.8.1", - "snapdragon-node": "^2.0.1", - "split-string": "^3.0.2", - "to-regex": "^3.0.1" - }, - "dependencies": { - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "optional": true, - "requires": { - "is-extendable": "^0.1.0" - } - } - } - }, - "expand-brackets": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", - "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", - "dev": true, - "optional": true, - "requires": { - "debug": "^2.3.3", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "posix-character-classes": "^0.1.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, - "optional": true, - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "optional": true, - "requires": { - "is-extendable": "^0.1.0" - } - }, - "is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, - "optional": true, - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, - "optional": true, - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dev": true, - "optional": true, - "requires": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - } - }, - "kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "dev": true, - "optional": true - } - } - }, - "extglob": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "dev": true, - "optional": true, - "requires": { - "array-unique": "^0.3.2", - "define-property": "^1.0.0", - "expand-brackets": "^2.1.4", - "extend-shallow": "^2.0.1", - "fragment-cache": "^0.2.1", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "dependencies": { - "define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, - "optional": true, - "requires": { - "is-descriptor": "^1.0.0" - } - }, - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "optional": true, - "requires": { - "is-extendable": "^0.1.0" - } - } - } - }, - "fill-range": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", - "dev": true, - "optional": true, - "requires": { - "extend-shallow": "^2.0.1", - "is-number": "^3.0.0", - "repeat-string": "^1.6.1", - "to-regex-range": "^2.1.0" - }, - "dependencies": { - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "optional": true, - "requires": { - "is-extendable": "^0.1.0" - } - } - } - }, - "is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dev": true, - "optional": true, - "requires": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - } - }, - "is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^3.0.2" - }, - "dependencies": { - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, - "optional": true, - "requires": { - "is-buffer": "^1.1.5" - } - } - } - }, - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true, - "optional": true - }, - "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true - }, - "micromatch": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", "dev": true, - "optional": true, "requires": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "braces": "^2.3.1", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "extglob": "^2.0.4", - "fragment-cache": "^0.2.1", - "kind-of": "^6.0.2", - "nanomatch": "^1.2.9", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.2" + "end-of-stream": "^1.1.0", + "once": "^1.3.1" } } } }, - "regenerate": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", - "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==", - "dev": true - }, - "regenerator-runtime": { - "version": "0.11.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", - "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==", + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", "dev": true }, - "regenerator-transform": { - "version": "0.10.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", - "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", - "dev": true, - "requires": { - "babel-runtime": "^6.18.0", - "babel-types": "^6.19.0", - "private": "^0.1.6" - } - }, - "regex-cache": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", - "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", - "dev": true, - "optional": true, - "requires": { - "is-equal-shallow": "^0.1.3" - } - }, - "regex-not": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", - "dev": true, - "requires": { - "extend-shallow": "^3.0.2", - "safe-regex": "^1.1.0" - } + "quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==" }, - "regexpu-core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-2.0.0.tgz", - "integrity": "sha1-SdA4g3uNz4v6W5pCE5k45uoq4kA=", + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", "dev": true, "requires": { - "regenerate": "^1.2.1", - "regjsgen": "^0.2.0", - "regjsparser": "^0.1.4" + "safe-buffer": "^5.1.0" } }, - "regjsgen": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.2.0.tgz", - "integrity": "sha1-bAFq3qxVT3WCP+N6wFuS1aTtsfc=", + "readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", "dev": true }, - "regjsparser": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.1.5.tgz", - "integrity": "sha1-fuj4Tcb6eS0/0K4ijSS9lJ6tIFw=", + "release-zalgo": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", + "integrity": "sha512-gUAyHVHPPC5wdqX/LG4LWtRYtgjxyX78oanFNTMMyFEfOqdC54s3eE82imuWKbOeqYht2CrNf64Qb8vgmmtZGA==", "dev": true, "requires": { - "jsesc": "~0.5.0" - }, - "dependencies": { - "jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", - "dev": true - } + "es6-error": "^4.0.1" } }, "remove-bom-buffer": { @@ -4627,99 +9330,33 @@ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", "dev": true }, - "repeat-element": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz", - "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==", + "replace-ext": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.1.tgz", + "integrity": "sha512-yD5BHCe7quCgBph4rMQ+0KkIRKwWCrHDOX1p1Gp6HwjPM5kVoCdKGNhN7ydqqsX6lJEnQDKZ/tFMiEdQ1dvPEw==", "dev": true }, - "repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", "dev": true }, - "repeating": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", - "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=", - "dev": true, - "requires": { - "is-finite": "^1.0.0" - } - }, - "replace-ext": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.0.tgz", - "integrity": "sha1-3mMSg3P8v3w8z6TeWkgMRaZ5WOs=", - "dev": true - }, - "request": { - "version": "2.88.0", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz", - "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==", - "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.0", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.4.3", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, - "request-progress": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-2.0.1.tgz", - "integrity": "sha1-XTa7V5YcZzqlt4jbyBQf3yO0Tgg=", - "dev": true, - "optional": true, - "requires": { - "throttleit": "^1.0.0" - } - }, - "request-promise-core": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/request-promise-core/-/request-promise-core-1.1.1.tgz", - "integrity": "sha1-Pu4AssWqgyOc+wTFcA2jb4HNCLY=", - "requires": { - "lodash": "^4.13.1" - } + "require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true }, - "request-promise-native": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/request-promise-native/-/request-promise-native-1.0.5.tgz", - "integrity": "sha1-UoF3D2jgyXGeUWP9P6tIIhX0/aU=", - "requires": { - "request-promise-core": "1.1.1", - "stealthy-require": "^1.1.0", - "tough-cookie": ">=2.3.3" - } + "resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==" }, - "resolve": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.1.7.tgz", - "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=", + "resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true }, "resolve-options": { @@ -4731,447 +9368,426 @@ "value-or-function": "^3.0.0" } }, - "resolve-url": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", - "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=", - "dev": true + "responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "requires": { + "lowercase-keys": "^3.0.0" + } }, - "ret": { - "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", - "dev": true + "retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==" }, "rimraf": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.2.tgz", - "integrity": "sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, "requires": { - "glob": "^7.0.5" + "glob": "^7.1.3" + }, + "dependencies": { + "glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } } }, "safe-buffer": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", - "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true }, - "safe-regex": { + "safe-regex-test": { "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", "dev": true, "requires": { - "ret": "~0.1.10" + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" } }, "safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "optional": true }, "sax": { "version": "1.2.1", - "resolved": "http://registry.npmjs.org/sax/-/sax-1.2.1.tgz", - "integrity": "sha1-e45lYZCyKOgaZq6nSEgNgozS03o=", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", + "integrity": "sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA==", "dev": true }, "semver": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.3.0.tgz", - "integrity": "sha1-myzl094C0XxgEq0yaqa00M9U+U8=" + "version": "7.7.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.0.tgz", + "integrity": "sha512-DrfFnPzblFmNrIZzg5RzHegbiRWg7KMR7btwi2yjHwx06zsUbO5g613sVwEV7FTwmzJu+Io0lJe2GJ3LxqpvBQ==" + }, + "serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "requires": { + "randombytes": "^2.1.0" + } }, "set-blocking": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true }, - "set-value": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz", - "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==", + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", "dev": true, "requires": { - "extend-shallow": "^2.0.1", - "is-extendable": "^0.1.1", - "is-plain-object": "^2.0.3", - "split-string": "^3.0.1" - }, - "dependencies": { - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "requires": { - "is-extendable": "^0.1.0" - } - } + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" } }, - "shelljs": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.3.0.tgz", - "integrity": "sha1-NZbmMHp4FUT1kfN9phg2DzHbV7E=", - "dev": true + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "requires": { + "shebang-regex": "^3.0.0" + } }, - "signal-exit": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", - "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=" + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==" }, - "slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", + "signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, - "snapdragon": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", - "dev": true, + "smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==" + }, + "socks": { + "version": "2.8.4", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.4.tgz", + "integrity": "sha512-D3YaD0aRxR3mEcqnidIs7ReYJFVzWdd6fXJYUM8ixcQcJRGTka/b3saV0KflYhyVJXKhb947GndU35SxYNResQ==", "requires": { - "base": "^0.11.1", - "debug": "^2.2.0", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "map-cache": "^0.2.2", - "source-map": "^0.5.6", - "source-map-resolve": "^0.5.0", - "use": "^3.1.0" - }, - "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, - "requires": { - "is-descriptor": "^0.1.0" - } - }, - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "requires": { - "is-extendable": "^0.1.0" - } - } + "ip-address": "^9.0.5", + "smart-buffer": "^4.2.0" } }, - "snapdragon-node": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "requires": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + } + }, + "spawn-wrap": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz", + "integrity": "sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg==", "dev": true, - "optional": true, "requires": { - "define-property": "^1.0.0", - "isobject": "^3.0.0", - "snapdragon-util": "^3.0.1" + "foreground-child": "^2.0.0", + "is-windows": "^1.0.2", + "make-dir": "^3.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "which": "^2.0.1" }, "dependencies": { - "define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dev": true, - "optional": true, - "requires": { - "is-descriptor": "^1.0.0" - } - }, - "is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^6.0.0" - } - }, - "is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "foreground-child": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-2.0.0.tgz", + "integrity": "sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA==", "dev": true, - "optional": true, "requires": { - "kind-of": "^6.0.0" + "cross-spawn": "^7.0.0", + "signal-exit": "^3.0.2" } }, - "is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, - "optional": true, "requires": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" + "isexe": "^2.0.0" } - }, - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true, - "optional": true - }, - "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true } } }, - "snapdragon-util": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", - "dev": true, - "optional": true, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "ssri": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-12.0.0.tgz", + "integrity": "sha512-S7iGNosepx9RadX82oimUkvr0Ct7IjJbEbs4mJcTxst8um95J3sDYU1RBEOvdu6oL1Wek2ODI5i4MAw+dZ6cAQ==", "requires": { - "kind-of": "^3.2.0" + "minipass": "^7.0.3" + }, + "dependencies": { + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" + } } }, - "source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==", "dev": true }, - "source-map-resolve": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.2.tgz", - "integrity": "sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==", - "dev": true, + "streamx": { + "version": "2.22.0", + "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.22.0.tgz", + "integrity": "sha512-sLh1evHOzBy/iWRiR6d1zRcLao4gGZr3C1kzNz4fopCOKJb6xD9ub8Mpi9Mr1R6id5o43S+d93fI48UC5uM9aw==", "requires": { - "atob": "^2.1.1", - "decode-uri-component": "^0.2.0", - "resolve-url": "^0.2.1", - "source-map-url": "^0.4.0", - "urix": "^0.1.0" + "bare-events": "^2.2.0", + "fast-fifo": "^1.3.2", + "text-decoder": "^1.1.0" } }, - "source-map-support": { - "version": "0.4.18", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", - "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", - "dev": true, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "requires": { - "source-map": "^0.5.6" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" } }, - "source-map-url": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", - "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=", - "dev": true - }, - "split": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz", - "integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==", - "dev": true, - "optional": true, + "string-width-cjs": { + "version": "npm:string-width@4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "requires": { - "through": "2" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" } }, - "split-string": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", - "dev": true, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "requires": { - "extend-shallow": "^3.0.0" + "ansi-regex": "^5.0.1" } }, - "sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", - "dev": true - }, - "sshpk": { - "version": "1.14.2", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.14.2.tgz", - "integrity": "sha1-xvxhZIo9nE52T9P8306hBeSSupg=", + "strip-ansi-cjs": { + "version": "npm:strip-ansi@6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" + "ansi-regex": "^5.0.1" } }, - "stack-trace": { - "version": "0.0.10", - "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", - "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=", - "dev": true, - "optional": true + "strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true }, - "static-extend": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", - "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", + "strip-json-comments": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-1.0.4.tgz", + "integrity": "sha1-HhX7ysl9Pumb8tc7TGVrCCu6+5E=", + "dev": true + }, + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, "requires": { - "define-property": "^0.2.5", - "object-copy": "^0.1.0" + "has-flag": "^4.0.0" + } + }, + "tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "requires": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" }, "dependencies": { - "define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dev": true, - "requires": { - "is-descriptor": "^0.1.0" - } + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" + }, + "mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==" + }, + "yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==" } } }, - "stealthy-require": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/stealthy-require/-/stealthy-require-1.1.1.tgz", - "integrity": "sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks=" - }, - "stream-shift": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.0.tgz", - "integrity": "sha1-1cdSgl5TZ+eG944Y5EXqIjoVWVI=", - "dev": true - }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "tar-fs": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.1.1.tgz", + "integrity": "sha512-LZA0oaPOc2fVo82Txf3gw+AkEd38szODlptMYejQUhndHMLQ9M059uXR+AfS7DNo0NpINvSqDsvyaCrBVkptWg==", "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" + "bare-fs": "^4.0.1", + "bare-path": "^3.0.0", + "pump": "^3.0.0", + "tar-stream": "^3.1.5" } }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "tar-stream": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz", + "integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==", "requires": { - "safe-buffer": "~5.1.0" + "b4a": "^1.6.4", + "fast-fifo": "^1.2.0", + "streamx": "^2.15.0" } }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, "requires": { - "ansi-regex": "^2.0.0" + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "dependencies": { + "glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + } } }, - "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" - }, - "supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true - }, - "tar": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-2.2.1.tgz", - "integrity": "sha1-jk0qJWwOIYXGsYrWlK7JaLg8sdE=", + "text-decoder": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.2.3.tgz", + "integrity": "sha512-3/o9z3X0X0fTupwsYvR03pJ/DjWuqqrfwBgTQzdWDiQSm9KitAyz/9WqsT2JQW7KV2m+bC2ol/zqpW37NHxLaA==", "requires": { - "block-stream": "*", - "fstream": "^1.0.2", - "inherits": "2" + "b4a": "^1.6.4" } }, - "tar-fs": { - "version": "1.16.3", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-1.16.3.tgz", - "integrity": "sha512-NvCeXpYx7OsmOh8zIOP/ebG55zZmxLE0etfWRbWok+q2Qo8x/vOR/IJT1taADXPe+jsiu9axDb3X4B+iIgNlKw==", + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, "requires": { - "chownr": "^1.0.1", - "mkdirp": "^0.5.1", - "pump": "^1.0.0", - "tar-stream": "^1.1.2" + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" }, "dependencies": { - "pump": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/pump/-/pump-1.0.3.tgz", - "integrity": "sha512-8k0JupWme55+9tCVE+FS5ULT3K6AbgqrGa58lTT49RpyfwwcGedHqaC5LlQNdEAumn/wFsu6aPwkuPMioy8kqw==", + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" + "safe-buffer": "~5.1.0" } } } }, - "tar-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", - "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", - "requires": { - "bl": "^1.0.0", - "buffer-alloc": "^1.2.0", - "end-of-stream": "^1.0.0", - "fs-constants": "^1.0.0", - "readable-stream": "^2.3.0", - "to-buffer": "^1.1.1", - "xtend": "^4.0.0" - } - }, - "throttleit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.0.tgz", - "integrity": "sha1-nnhYNtr0Z0MUWlmEtiaNgoUorGw=", - "dev": true, - "optional": true - }, - "through": { - "version": "2.3.8", - "resolved": "http://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", - "dev": true, - "optional": true - }, - "through2": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.3.tgz", - "integrity": "sha1-AARWmzfHx0ujnEPzzteNGtlBQL4=", - "dev": true, - "requires": { - "readable-stream": "^2.1.5", - "xtend": "~4.0.1" - } - }, "through2-filter": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/through2-filter/-/through2-filter-2.0.0.tgz", - "integrity": "sha1-YLxVoNrLdghdsfna6Zq0P4PWIuw=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/through2-filter/-/through2-filter-3.0.0.tgz", + "integrity": "sha512-jaRjI2WxN3W1V8/FMZ9HKIBXixtiqs3SQSX4/YGIiP3gL6djW48VoZq9tDqeCWs3MT8YY5wb/zli8VW8snY1CA==", "dev": true, "requires": { "through2": "~2.0.0", "xtend": "~4.0.0" } }, + "tinyglobby": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz", + "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==", + "requires": { + "fdir": "^6.4.4", + "picomatch": "^4.0.2" + } + }, "to-absolute-glob": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/to-absolute-glob/-/to-absolute-glob-2.0.2.tgz", @@ -5182,61 +9798,6 @@ "is-negated-glob": "^1.0.0" } }, - "to-buffer": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", - "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" - }, - "to-fast-properties": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", - "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc=", - "dev": true - }, - "to-object-path": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", - "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", - "dev": true, - "requires": { - "kind-of": "^3.0.2" - } - }, - "to-regex": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", - "dev": true, - "requires": { - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "regex-not": "^1.0.2", - "safe-regex": "^1.1.0" - } - }, - "to-regex-range": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", - "dev": true, - "optional": true, - "requires": { - "is-number": "^3.0.0", - "repeat-string": "^1.6.1" - }, - "dependencies": { - "is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dev": true, - "optional": true, - "requires": { - "kind-of": "^3.0.2" - } - } - } - }, "to-through": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/to-through/-/to-through-2.0.0.tgz", @@ -5246,76 +9807,24 @@ "through2": "^2.0.3" } }, - "tough-cookie": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz", - "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==", - "requires": { - "psl": "^1.1.24", - "punycode": "^1.4.1" - } + "tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=" }, - "trim-right": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/trim-right/-/trim-right-1.0.1.tgz", - "integrity": "sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM=", + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", "dev": true }, - "tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", - "optional": true - }, - "type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", - "dev": true, - "requires": { - "prelude-ls": "~1.1.2" - } - }, - "typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", - "dev": true, - "optional": true - }, - "uglify-js": { - "version": "3.4.9", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.4.9.tgz", - "integrity": "sha512-8CJsbKOtEbnJsTyv6LE6m6ZKniqMiFWmm9sRbopbkGs3gMPPfd3Fh8iIA4Ykv5MgaTbqHr4BaoGLJLZNhsrW1Q==", + "typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", "dev": true, - "optional": true, "requires": { - "commander": "~2.17.1", - "source-map": "~0.6.1" - }, - "dependencies": { - "commander": { - "version": "2.17.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.1.tgz", - "integrity": "sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==", - "dev": true, - "optional": true - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "optional": true - } + "is-typedarray": "^1.0.0" } }, "unc-path-regex": { @@ -5324,55 +9833,30 @@ "integrity": "sha1-5z3T17DXxe2G+6xrCufYxqadUPo=", "dev": true }, - "unicode-5.2.0": { - "version": "0.7.5", - "resolved": "https://registry.npmjs.org/unicode-5.2.0/-/unicode-5.2.0-0.7.5.tgz", - "integrity": "sha512-KVGLW1Bri30x00yv4HNM8kBxoqFXr0Sbo55735nvrlsx4PYBZol3UtoWgO492fSwmsetzPEZzy73rbU8OGXJcA==", - "dev": true + "unique-filename": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-4.0.0.tgz", + "integrity": "sha512-XSnEewXmQ+veP7xX2dS5Q4yZAvO40cBN2MWkJ7D/6sW4Dg6wYBNwM1Vrnz1FhH5AdeLIlUXRI9e28z1YZi71NQ==", + "requires": { + "unique-slug": "^5.0.0" + } }, - "union-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz", - "integrity": "sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ=", - "dev": true, + "unique-slug": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-5.0.0.tgz", + "integrity": "sha512-9OdaqO5kwqR+1kVgHAhsp5vPNU0hnxRa26rBFNfNgM7M6pNtgzeBn3s/xbyCQL3dcjzOatcef6UUHpB/6MaETg==", "requires": { - "arr-union": "^3.1.0", - "get-value": "^2.0.6", - "is-extendable": "^0.1.1", - "set-value": "^0.4.3" - }, - "dependencies": { - "extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dev": true, - "requires": { - "is-extendable": "^0.1.0" - } - }, - "set-value": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-0.4.3.tgz", - "integrity": "sha1-fbCPnT0i3H945Trzw79GZuzfzPE=", - "dev": true, - "requires": { - "extend-shallow": "^2.0.1", - "is-extendable": "^0.1.1", - "is-plain-object": "^2.0.1", - "to-object-path": "^0.3.0" - } - } + "imurmurhash": "^0.1.4" } }, "unique-stream": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/unique-stream/-/unique-stream-2.2.1.tgz", - "integrity": "sha1-WqADz76Uxf+GbE59ZouxxNuts2k=", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/unique-stream/-/unique-stream-2.3.1.tgz", + "integrity": "sha512-2nY4TnBE70yoxHkDli7DMazpWiP7xMdCYqU2nBRO0UB+ZpEkGsSija7MvmvnZFUeC+mrgiUfcHSr3LmRFIg4+A==", "dev": true, "requires": { - "json-stable-stringify": "^1.0.0", - "through2-filter": "^2.0.0" + "json-stable-stringify-without-jsonify": "^1.0.1", + "through2-filter": "^3.0.0" } }, "universalify": { @@ -5380,58 +9864,16 @@ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==" }, - "unset-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", - "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", + "update-browserslist-db": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.2.tgz", + "integrity": "sha512-PPypAm5qvlD7XMZC3BujecnaOxwhrtoFR+Dqkk5Aa/6DssiH0ibKoketaj9w8LP7Bont1rYeoV5plxD7RTEPRg==", "dev": true, "requires": { - "has-value": "^0.3.1", - "isobject": "^3.0.0" - }, - "dependencies": { - "has-value": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", - "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", - "dev": true, - "requires": { - "get-value": "^2.0.3", - "has-values": "^0.1.4", - "isobject": "^2.0.0" - }, - "dependencies": { - "isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dev": true, - "requires": { - "isarray": "1.0.0" - } - } - } - }, - "has-values": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", - "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", - "dev": true - }, - "isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "dev": true - } + "escalade": "^3.2.0", + "picocolors": "^1.1.1" } }, - "urix": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", - "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=", - "dev": true - }, "url": { "version": "0.10.3", "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", @@ -5450,36 +9892,30 @@ } } }, - "use": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", - "dev": true - }, - "user-home": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/user-home/-/user-home-1.1.1.tgz", - "integrity": "sha1-K1viOjK2Onyd640PKNSFcko98ZA=", - "dev": true + "util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } }, "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true }, "uuid": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", - "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" - }, - "v8flags": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/v8flags/-/v8flags-2.1.1.tgz", - "integrity": "sha1-qrGh+jDUX4jdMhFIh1rALAtV5bQ=", - "dev": true, - "requires": { - "user-home": "^1.1.1" - } + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz", + "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==", + "dev": true }, "value-or-function": { "version": "3.0.0", @@ -5487,20 +9923,10 @@ "integrity": "sha1-HCQ6ULWVwb5Up1S/7OhWO5/42BM=", "dev": true }, - "verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, "vinyl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/vinyl/-/vinyl-2.2.0.tgz", - "integrity": "sha512-MBH+yP0kC/GQ5GwBqrTPTzEfiiLjta7hTtvQtbxBgTeSXsmKQRQecjibMbxIXzVT3Y9KJK+drOz1/k+vsu8Nkg==", + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/vinyl/-/vinyl-2.2.1.tgz", + "integrity": "sha512-LII3bXRFBZLlezoG5FfZVcXflZgWP/4dCwKtxd5ky9+LOtM4CS3bIRQsmR1KMnMW07jpE8fqR2lcxPZ+8sJIcw==", "dev": true, "requires": { "clone": "^2.1.1", @@ -5534,6 +9960,38 @@ "value-or-function": "^3.0.0", "vinyl": "^2.0.0", "vinyl-sourcemap": "^1.1.0" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + } } }, "vinyl-sourcemap": { @@ -5552,28 +10010,62 @@ } }, "walk": { - "version": "2.3.14", - "resolved": "https://registry.npmjs.org/walk/-/walk-2.3.14.tgz", - "integrity": "sha512-5skcWAUmySj6hkBdH6B6+3ddMjVQYH5Qy9QGbPmN8kVmLteXk+yVXg+yfk1nbX30EYakahLrr8iPcCxJQSCBeg==", + "version": "2.3.15", + "resolved": "https://registry.npmjs.org/walk/-/walk-2.3.15.tgz", + "integrity": "sha512-4eRTBZljBfIISK1Vnt69Gvr2w/wc3U6Vtrw7qiN5iqYJPH7LElcYh/iU4XWhdCy2dZqv1ToMyYlybDylfG/5Vg==", "dev": true, "requires": { "foreachasync": "^3.0.0" } }, + "webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=" + }, + "whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha1-lmRU6HZUYuN2RNNib2dCzotwll0=", + "requires": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/which/-/which-5.0.0.tgz", + "integrity": "sha512-JEdGzHwwkrbWoGOlIHqQ5gtprKGOenpDHpxE9zVR1bWbOtYRyPPHMe9FaP6x61CmNaTThSkb0DAJte5jD+DmzQ==", "requires": { - "isexe": "^2.0.0" + "isexe": "^3.1.1" + }, + "dependencies": { + "isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==" + } } }, - "wide-align": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", - "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "which-module": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", + "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==", + "dev": true + }, + "which-typed-array": { + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + "dev": true, "requires": { - "string-width": "^1.0.2 || 2" + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" } }, "window-size": { @@ -5582,34 +10074,10 @@ "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=", "dev": true }, - "winston": { - "version": "2.4.4", - "resolved": "https://registry.npmjs.org/winston/-/winston-2.4.4.tgz", - "integrity": "sha512-NBo2Pepn4hK4V01UfcWcDlmiVTs7VTB1h7bgnB0rgP146bYhMxX0ypCz3lBOfNxCO4Zuek7yeT+y/zM1OfMw4Q==", - "dev": true, - "optional": true, - "requires": { - "async": "~1.0.0", - "colors": "1.0.x", - "cycle": "1.0.x", - "eyes": "0.1.x", - "isstream": "0.1.x", - "stack-trace": "0.0.x" - }, - "dependencies": { - "async": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/async/-/async-1.0.0.tgz", - "integrity": "sha1-+PwEyjoTeErenhZBr5hXjPvWR6k=", - "dev": true, - "optional": true - } - } - }, - "wordwrap": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", - "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", + "workerpool": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz", + "integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==", "dev": true }, "wrap-ansi": { @@ -5620,6 +10088,53 @@ "requires": { "string-width": "^1.0.1", "strip-ansi": "^3.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + } + } + }, + "wrap-ansi-cjs": { + "version": "npm:wrap-ansi@7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" } }, "wrappy": { @@ -5627,37 +10142,50 @@ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, + "write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, "xml2js": { - "version": "0.4.19", - "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", - "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz", + "integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==", "dev": true, "requires": { "sax": ">=0.6.0", - "xmlbuilder": "~9.0.1" + "xmlbuilder": "~11.0.0" } }, "xmlbuilder": { - "version": "9.0.7", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", - "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=", + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", + "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==", "dev": true }, "xtend": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz", - "integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=" + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true }, "y18n": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz", - "integrity": "sha1-bRX7qITAhnnA136I53WegR4H+kE=", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz", + "integrity": "sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==", "dev": true }, "yallist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.2.tgz", - "integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=" + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "yargs": { "version": "3.29.0", @@ -5673,15 +10201,43 @@ "y18n": "^3.2.0" } }, - "yauzl": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.4.1.tgz", - "integrity": "sha1-lSj0QtqxsihOWLQ3m7GU4i4MQAU=", + "yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true + }, + "yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", "dev": true, - "optional": true, "requires": { - "fd-slicer": "~1.0.1" + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "dependencies": { + "camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true + }, + "decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true + } } + }, + "yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true } } } diff --git a/package.json b/package.json index b66f596195..41ccc32dc4 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "nodegit", "description": "Node.js libgit2 asynchronous native bindings", - "version": "0.24.0-alpha.1", + "version": "0.28.0-alpha.36", "homepage": "http://nodegit.org", "keywords": [ "libgit2", @@ -25,7 +25,7 @@ "email": "tylerw@axosoft.com" } ], - "main": "dist/nodegit.js", + "main": "lib/nodegit.js", "repository": { "type": "git", "url": "git://github.com/nodegit/nodegit.git" @@ -35,47 +35,37 @@ "lib": "./lib" }, "engines": { - "node": ">= 6" + "node": ">= 20" }, "dependencies": { + "@mapbox/node-pre-gyp": "^2.0.0", "fs-extra": "^7.0.0", - "lodash": "^4.17.11", - "nan": "^2.11.1", - "node-gyp": "^3.8.0", - "node-pre-gyp": "^0.11.0", - "promisify-node": "~0.3.0", - "ramda": "^0.25.0", - "request-promise-native": "^1.0.5", - "tar-fs": "^1.16.3" + "got": "^14.4.7", + "json5": "^2.1.0", + "lodash": "^4.17.14", + "nan": "^2.23.1", + "node-gyp": "^11.2.0", + "tar-fs": "^3.0.9" }, "devDependencies": { - "aws-sdk": "^2.326.0", - "babel-cli": "^6.7.7", - "babel-preset-es2015": "^6.6.0", - "cheerio": "^1.0.0-rc.2", + "aws-sdk": "^2.1095.0", "clean-for-publish": "~1.0.2", "combyne": "~0.8.1", - "coveralls": "^3.0.2", - "istanbul": "^0.4.5", "js-beautify": "~1.5.10", - "jshint": "^2.9.6", + "jshint": "^2.10.0", "lcov-result-merger": "^3.1.0", - "mocha": "^5.2.0", + "mocha": "^11.4.0", + "nyc": "^17.1.0", "walk": "^2.3.9" }, - "vendorDependencies": { - "libssh2": "1.8.0", - "http_parser": "2.5.0" - }, "binary": { + "bucket_name": "axonodegit", "module_name": "nodegit", "module_path": "./build/Release/", "host": "https://axonodegit.s3.amazonaws.com/nodegit/nodegit/" }, "scripts": { - "babel": "babel --presets es2015 -d ./dist ./lib", "cov": "npm run cppcov && npm run filtercov && npm run mergecov", - "coveralls": "cat ./test/coverage/merged.lcov | coveralls", "cppcov": "mkdir -p test/coverage/cpp && ./lcov-1.10/bin/lcov --gcov-tool /usr/bin/gcov-4.9 --capture --directory build/Release/obj.target/nodegit/src --output-file test/coverage/cpp/lcov_full.info", "filtercov": "./lcov-1.10/bin/lcov --extract test/coverage/cpp/lcov_full.info $(pwd)/src/* $(pwd)/src/**/* $(pwd)/include/* $(pwd)/include/**/* --output-file test/coverage/cpp/lcov.info && rm test/coverage/cpp/lcov_full.info", "generateJson": "node generate/scripts/generateJson", @@ -83,14 +73,13 @@ "generateNativeCode": "node generate/scripts/generateNativeCode", "install": "node lifecycleScripts/preinstall && node lifecycleScripts/install", "installDebug": "BUILD_DEBUG=true npm install", - "lint": "jshint lib test/tests test/utils examples lifecycleScripts", + "lint": "jshint lib test/tests test/utils lifecycleScripts", "mergecov": "lcov-result-merger 'test/**/*.info' 'test/coverage/merged.lcov' && ./lcov-1.10/bin/genhtml test/coverage/merged.lcov --output-directory test/coverage/report", "mocha": "mocha --expose-gc test/runner test/tests --timeout 15000", - "mochaDebug": "mocha --expose-gc --debug-brk test/runner test/tests --timeout 15000", + "mochaDebug": "mocha --expose-gc --inspect-brk test/runner test/tests --timeout 15000", "postinstall": "node lifecycleScripts/postinstall", - "prepublish": "npm run babel", - "rebuild": "node generate && npm run babel && node-gyp configure build", - "rebuildDebug": "node generate && npm run babel && node-gyp configure --debug build", + "rebuild": "node generate && node-gyp configure build", + "rebuildDebug": "node generate && node-gyp configure --debug build", "recompile": "node-gyp configure build", "recompileDebug": "node-gyp configure --debug build", "test": "npm run lint && node --expose-gc test", diff --git a/test/README.md b/test/README.md index e96c792177..e6bdccbc4d 100644 --- a/test/README.md +++ b/test/README.md @@ -17,5 +17,20 @@ Unit tests for NodeGit. Test utilities with garbage collector, index, and repository setup, that can be used in tests. -## Note -\*.enc are encrypted in base64 and unencrypted before the test suite runs as \*. +## Keys + +Note: all files are encoded in base64 in `\*.enc` and decoded before the test suite runs. + +### encrypted_rsa + - passphrase "test-password" + - registered as deploy key on [nodegit/test](https://github.com/nodegit/test) repo named "Encrypted test key" + +### id_rsa + - registered as deploy key on [nodegit/test](https://github.com/nodegit/test) repo named "Unencrypted Test Key" + +### private.ppk + - same key as id_rsa + - ppk format is used by putty/pageant and converted/generated by puttygen + +### nodegit-test-rsa + - registered as deploy key on [nodegit/private](https://github.com/nodegit/private) repo named "Tests" diff --git a/test/id_rsa.enc b/test/id_rsa.enc index cd8879bef7..8c992bebde 100644 --- a/test/id_rsa.enc +++ b/test/id_rsa.enc @@ -1 +1 @@ -LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBd1pCektEVVVaQjJEUk94SjBDM1JjVElGWEtob0RUMkE0YTFvaXpKV0xzNlpoSEZKCjRjRnQ4eldpR3VDZGlaUnBFVFlHR1ZZbXRTSE5WZDVLQ1R4TjJEdHZuc1FIeVFTRDNtK2p0YWd3QTNOb1JSUjgKM2R2TUpoeWxKRHV0Y3M2Tm1ZdzBIMmhGYll6ZC9XSXN0ZlFHM1hUZDI4emE0TDdrRStlK0gydWtZSVpka1gzbgpDbUR1KzRod05VUmx5QlhQSVhsMVNGTGxGZjR4NDIrZWxxcUJ4L0w4cmRRNmxNL1BhTW9oNnZBcEx0S3N5amZOCkFqb1htVDdIZ05STDA5TTVTSUcxd2NjSW96ZU1VWnJ5Snl0SHdlU25QYTk1SFhSOXJVKzFkUFd6aFNiTHhrR2sKSmtOVmhVd3FvNlFSY05RaHluRzN6eXpzV3k2VlNFZllNc2ROMFFJREFRQUJBb0lCQUJzWk5QWUJFRnkvd1B2cQpOSjgvZXQzbENka2gvb2MwQUJJWUs5V284MlhVS0t2aERGM2RyWjNwK1VyWC9WWWdmK0VYOWh5ZjhnVlR1U0ozClgxZ1JxRGhJZ2VUeFBzSEdyd3Q2QjZwTDVJVG5LRWJiaW11bzlOaTFFKzJScVVPMFpTQ0UvMXNTUnY0Q1JhWE8KazhIWmF3aWY3dHR4djRiTlVyTHlzNnhFYnB2UWxPTXpnczRzL09CQi9YTUVxbkZSR1BKZWVUeThia09XeVR3bApMajA2bnEyYnJzNHFLNGVpakkvTW9HeTFDRDhKQ3BMNGdHMzlHUFRYZDhHcHVkWG1kZWxEbjFFMHQ5bmhMNlNlCmFPTWFpUGh5N2tCSkQ0d1ovL1daVFNSMVh5ak5CSDNER2tOWnhQSVdjWCt3SkZ5Tm9MYlNiVlNkYS83RHR2cDMKQ1BmaU5oRUNnWUVBLyszSnN3U3pjVkVBTk5GNU9MWjc2eCtUT0RrWjlUNllGNFNSOC91SmpOVmlXZ1VwWDd2dwpteVhGKzJBd3pOYW90YkJLbU5HNjE5QmNVZU1tUUI3NmMrVWlNTGVKdUpjVC9KajB4bUVVb3BIb25HcUVJY3ZnCkhnNmNhZkUxaXM3ZCtsNjY5YmZqaXRseCszbXVGMkNZbnlsU04xTFdIeElJVFZVajNCbWNXcVVDZ1lFQXdaNDUKV2RhSGZLN0c2R2pJN2xpRFFUNFpsc2xBOGRtTHYySmwyRXhCQk1vWTNtM1NyZTQyOHoyWkZhNE8vbnNCWVAwYQpEeGdZbVgyMGZRR2NiUHVnS2RDWUhjN0hrS2JNVTFHd2lWQ0dwRFlaQ20yZ0pLVHZhbTNkWU5haUFmcTVEeWhQCnpEQ1pOSjVyclNNcHJYc3VSdjJPNGM1dThxdEo1QnlhT0pCak9yMENnWUJNbGtBeHprcFVzc1M1Q2FhWkRpTHYKTGJmRXIzSFJMallkYzVLcHpMQlE4TnBKemhtZmlJSnNLMVdmOEIwcWIySjFYSmcyT3kwS3dGT2dQYldJb3J5WQpTZzE5UHE5OENkbjFVV0NPclNhYnI4WklhS2U1NVdUZ0djYzgvTzNrNkJzTmZhTzlQSlpmU3NzTlVsQ0N0bWwxCjE4dSt1bzlSSlBoUERCZDdHajdyOFFLQmdGcmF4V3k3dDI0eGtaTURnSzRmaU0vM3RRaEZ2aHovQ1kyd1BieEcKNUFlOFVma21MY09DVWZUSVJlcWZkOWZBbnNBRlpOSUthNWl6SFJ1L3dzaDlOd1lJSlNsdm04UHNFVnRUclBSeQpmZ3ZXZXQraTI0LzJlWVpHc2FnOGIxOWdhTENOS1F6WERUMWN6WWc4Uk5Wc1JTWDQyN0JvTHpYZVhOa1c5dU51CkZiSTlBb0dBVjJreGNkY0tTNEJ0TkhLUGVHZ1Y4N2RNMERXaFFhQXRFWEVJY1FxdUZ0YmEwbEFYaW9HSGc4VTQKemVpdWdsNFF6Y2h3azVxZDN3blo0U09oeDBzMTYvNWdRRGxua2JqRlI2RVJFVW52TFJ3VjkyekJYVVRPR0lraApaN1o0cmNnVUtsVkFhSFQzT0hOL2xUeXFKRy9pYitLNHdaaGJ6dGwvb3grSlVGc3ZEOTg9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== \ No newline at end of file +LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUNGd0FBQUFkemMyZ3RjbgpOaEFBQUFBd0VBQVFBQUFnRUFzRG5CRGV2dVZTNzFLVHg0OEdiRzlMeHp3UUJ1OVVYc25Qd3ROdGh3bHdpRmpYL1M5U0lKCnhwNENIdVBjS3JYdjJLcTI1NnRMby9jcENkZk9waFhjSWNpZ09RNzc1MGQwSDAvZkpyMHpzWjh3akFiZ1ZPOXpPanlRbXUKbE15WWNzWmpkMURESjQ2djBwNGU4YTZsbldwbHduVFNzVVB3OVVXRkt0MDRDcDZSNVpXaHdlTDg5cmI2Qk13SEJySURNWQpWY2VZSVlGZFRsZWU5MStGQWhLZVVFVE9LNzF6aWRPQXY0Ti9jdnRUQWNtYmlhTTgvUEtXVDEvRDFCeDJ6YzJsY0d3Y2RWCkJiaUxWQmNZKzJReGZzWjVlMGk5SGhKdjdCcWw4SXgxTTYzaVlkQU0yQUFseEZkTU1ONEMwUW1YeDlkSkxTdFdFcXRXZ2gKSHNGY05MSnpDTUs5MGRnSEh3OHZnOTFtdGNMVjVUS01nb3RXaEI2YjRMdkZEUUlac2V5RnY0cVJnM2NOb1hlUFY3bmg3OQp1YVlhL0NremJrNEdZQytsbXhENndFZDhHOGM4d0s4cjJ3NW04ZTAwWmdrQUVhbnovZGZRZ1dzdHBkajRmK05RdUVXRnN6CnNpeGlrTUtOVnJhWnpoRmFUaE9DbTNUUHNmY05LZ2NBN3VXVGJZNllNMlQ1SmowU3ZHRW5Ka1EzUitIK1RIamxqc0wrMnUKWVRCM2NlZ2dXc1dzSmlyazFTdjNlMURxUlhUeGNTcDlyRWlMaHVxV0NLL0t4QzZyWGpBMGJWSkgxMENnRjJNZWtVRTJtNApmeVZjUE5ML0VYWVN1V2t6elZzTDFjdzJSc3psS3RIQVJMNzRISlp6Z3RZMjZ2SlFLOTlub09QMk10aEc0dWI5aWRyajN1CmtBQUFkQVVoVnBRRklWYVVBQUFBQUhjM05vTFhKellRQUFBZ0VBc0RuQkRldnVWUzcxS1R4NDhHYkc5THh6d1FCdTlVWHMKblB3dE50aHdsd2lGalgvUzlTSUp4cDRDSHVQY0tyWHYyS3EyNTZ0TG8vY3BDZGZPcGhYY0ljaWdPUTc3NTBkMEgwL2ZKcgowenNaOHdqQWJnVk85ek9qeVFtdWxNeVljc1pqZDFEREo0NnYwcDRlOGE2bG5XcGx3blRTc1VQdzlVV0ZLdDA0Q3A2UjVaCldod2VMODlyYjZCTXdIQnJJRE1ZVmNlWUlZRmRUbGVlOTErRkFoS2VVRVRPSzcxemlkT0F2NE4vY3Z0VEFjbWJpYU04L1AKS1dUMS9EMUJ4MnpjMmxjR3djZFZCYmlMVkJjWSsyUXhmc1o1ZTBpOUhoSnY3QnFsOEl4MU02M2lZZEFNMkFBbHhGZE1NTgo0QzBRbVh4OWRKTFN0V0VxdFdnaEhzRmNOTEp6Q01LOTBkZ0hIdzh2ZzkxbXRjTFY1VEtNZ290V2hCNmI0THZGRFFJWnNlCnlGdjRxUmczY05vWGVQVjduaDc5dWFZYS9Da3piazRHWUMrbG14RDZ3RWQ4RzhjOHdLOHIydzVtOGUwMFpna0FFYW56L2QKZlFnV3N0cGRqNGYrTlF1RVdGc3pzaXhpa01LTlZyYVp6aEZhVGhPQ20zVFBzZmNOS2djQTd1V1RiWTZZTTJUNUpqMFN2RwpFbkprUTNSK0grVEhqbGpzTCsydVlUQjNjZWdnV3NXc0ppcmsxU3YzZTFEcVJYVHhjU3A5ckVpTGh1cVdDSy9LeEM2clhqCkEwYlZKSDEwQ2dGMk1la1VFMm00ZnlWY1BOTC9FWFlTdVdrenpWc0wxY3cyUnN6bEt0SEFSTDc0SEpaemd0WTI2dkpRSzkKOW5vT1AyTXRoRzR1YjlpZHJqM3VrQUFBQURBUUFCQUFBQ0FGL2pUUlNTSitEWjZEUkQzMXFQMGZvaEFLc24zVGhBai9ycgpqSDZuVHJ3ZnV1dS9mYjQ4Z3kwN2xOUFNFRjU5R01EQVF6M1RpeGp3eDdlL1lZWWxwdDRMR0lOemo4WE1yM2JLTXhZVkpTCmVsQXZsdVZHcGkwRVFENkhzaUx0SUpaek5IUWIwZFNZWXpzckpwTkRBSUtpL2pQTTlVZlhQQ2w1Wm5ob1hySUlqa0pxSk4KWW0rSllXQWZ6U041Q0JGQlBDQ1F0a3FrNVd4WGFQd1pVWHBMUHpGVi93ajEwUVJSdldCMzRNVmowMHJKdElReitsOTRjQwpsSVpubm44dzBRdE5CelF4amlYS3dLVkUzQ2NONFpDbDFqd3EzQnljMDZHWTdtbnhRMlNYWFBMMERja0thNEptTGVMNUtuCmNyelJiRUllRWVEM3VoRnpVM0kwckVOUVJoNjY5SXByYWpmUnpMY0Z4bkM3M0JTMVJUeWNmcGRTR0ZPUFhULzJOZDM2MlgKU2VwNlZOeFN0NWE2d2tXZ0hKaDJIOGRQY25pREgxRG1yQWRQOTdBa2VpMWxtWHFiekNJVjlwaWQzbWZNQUpiSm00UmhRcApURHZldUU4TmlCZ0k2MlRrd3Vjb1cyeTNMZFN2MTM3aUpCbkpTYzExRzlBNWJHMnlRWEUyYWlYUXhIeW9UMlF0VmY1WklYClJhVkx1YlFuY1NnTEJqZ0NkMUNsakI1amxSWU10U2M4YkZhL1RKNW9YT1ZNdENYNHhhcVo3Z3JHS05CcUJsRFRXblRnWk0KbXZ3UFZ1Y2xlNzI4MzZPSlBYbzFMajNLM2ttcnhDMXVORzA3NjMvemJLOWl2QWF1SFRMMnQ0cHkra0k0NC9ZcERvR2sybgp4bFdNZEQzTDNKVXgzUXo5THhBQUFCQVFDVzh6YVI5T2VPMXAzWGkxNFg5MDNuOWNjY1JVcGRMOEZSUDZ5MWkrVFpmN2RSClpla3ltWDZGbU9qbTAvbm9XM0hwZVoyYmJEaUFQRFp1VzRmUW1nWjBnemxvZERDZUp2UHF2U1FzUWVISjRjMkdzVG4xVHMKMzNYU1RWeFVLd3dqbEZ2TGpvMkJBdmlBaGZ3YUN3UmxUaGlrRy9CdC8wbVhObTF6cHFZbnFBc1pXb3JqOVVWQTYyT1c4MQpDVVgxL2RVMWtjUkFpY2NsUWxqTlNEWGJ6aWJVN2pvdXpzQzNFVUVEcG1HZG1UUVh4WHNCNTFVTmh5eVZoMmdBVnFDMDAyCm1Kd2dFVXNqcHFzYUw2TTczYXo4Mzg4b0RwWGJwMUNLTER5aDh2SnBJTkgwak5OZEQxd1dvQVJRZUhzL05iUUZLUThJTkkKdlBzWnowWkdmRzcvdVMrSkFBQUJBUURiVG94anFOUHJGT1piWDkzeDZONTVJV05DVUt1cUwxVks2MURIYUtYZmlpd0hEeQpRYjEzUnhPREk2RlNXMElIeVpqMDh5ZjBTVElGOXNZTUFwNy9GS1FORElqVVZyMVI2Z0RFZ0F3K2N5L2dpeWowVWxxSE1zCmdUUnNnSmEvSjJQYnViRDRWMzdZUkQ4enB2a0tmOFNKMGJRalEwaUx0YUNVYm9BUDVmYWFYbElLdmUyeHpLdVgzT0l2TTMKTyt1UTBJMDZqZGtMc2JBRzEvZ0E5emJmaW1wTHdJQkJkVUl6djdoRTJqOGJoak9HbTVTSU5rczRZZVROVFZXZHhmcjdiLwowVlFPSXJDd0RQKytCaGM5N2QrWDdZVVVkUUgzUHBTV3JWb0pOc0hNcHVUWmhpd0NnRk1NT1RYSEdWbGpFOHJnZGVTbFBzCmdCMXNRaHhyUlNNQitmQUFBQkFRRE50ZTQrMW5sWUtWNkVRWXhyeUpVQUtPdE1LSmZBNVhSVHNzWGhzRXlSMDBxL1djd3QKcmZmMzV3N2ZBWEJWd2VOemVlaXlwZXZKc1lnUnBBdTlPTVl0d1hFQlY1Rit5SUJRa2lHMTdiU2V6L3NibnlvaVdVNkJBWApHYmRDamZhNGVVRVRGemJjbGp0S2xnQUJSR2pXRDdQRk82V2ZwQWpRcGNqYVFwSVQ2WHpYZnVmV2d0bG5Ga1d5UGRXekpMCjQyV1lDemNhU3JKU0ZLZnpORHZtUjNzbllOZHB1bE1aUEtlRnZtZTJUWmp1VFJSRTd1OEtaRnhQalBkK0E4R2FuQnJOUGkKalBjSXE1SmFDWnpMMzVkaGlYcGJCQzJTMlh5cktwbWMrWEpRODJxZU93ZDZlOW9KVjEzUDdKU3NZYUVqdlFUeU5yNkE2bgo0ODIvcW1SZHUxUjNBQUFBQm01dmJtRnRaUUVDQXdRPQotLS0tLUVORCBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0K \ No newline at end of file diff --git a/test/id_rsa.pub b/test/id_rsa.pub index aed84e47b9..77f36c6534 100644 --- a/test/id_rsa.pub +++ b/test/id_rsa.pub @@ -1 +1 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBkHMoNRRkHYNE7EnQLdFxMgVcqGgNPYDhrWiLMlYuzpmEcUnhwW3zNaIa4J2JlGkRNgYZVia1Ic1V3koJPE3YO2+exAfJBIPeb6O1qDADc2hFFHzd28wmHKUkO61yzo2ZjDQfaEVtjN39Yiy19AbddN3bzNrgvuQT574fa6Rghl2RfecKYO77iHA1RGXIFc8heXVIUuUV/jHjb56WqoHH8vyt1DqUz89oyiHq8Cku0qzKN80COheZPseA1EvT0zlIgbXBxwijN4xRmvInK0fB5Kc9r3kddH2tT7V09bOFJsvGQaQmQ1WFTCqjpBFw1CHKcbfPLOxbLpVIR9gyx03R +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCwOcEN6+5VLvUpPHjwZsb0vHPBAG71Reyc/C022HCXCIWNf9L1IgnGngIe49wqte/Yqrbnq0uj9ykJ186mFdwhyKA5DvvnR3QfT98mvTOxnzCMBuBU73M6PJCa6UzJhyxmN3UMMnjq/Snh7xrqWdamXCdNKxQ/D1RYUq3TgKnpHllaHB4vz2tvoEzAcGsgMxhVx5ghgV1OV573X4UCEp5QRM4rvXOJ04C/g39y+1MByZuJozz88pZPX8PUHHbNzaVwbBx1UFuItUFxj7ZDF+xnl7SL0eEm/sGqXwjHUzreJh0AzYACXEV0ww3gLRCZfH10ktK1YSq1aCEewVw0snMIwr3R2AcfDy+D3Wa1wtXlMoyCi1aEHpvgu8UNAhmx7IW/ipGDdw2hd49XueHv25phr8KTNuTgZgL6WbEPrAR3wbxzzAryvbDmbx7TRmCQARqfP919CBay2l2Ph/41C4RYWzOyLGKQwo1WtpnOEVpOE4KbdM+x9w0qBwDu5ZNtjpgzZPkmPRK8YScmRDdH4f5MeOWOwv7a5hMHdx6CBaxawmKuTVK/d7UOpFdPFxKn2sSIuG6pYIr8rELqteMDRtUkfXQKAXYx6RQTabh/JVw80v8RdhK5aTPNWwvVzDZGzOUq0cBEvvgclnOC1jbq8lAr32eg4/Yy2Ebi5v2J2uPe6Q== noname diff --git a/test/index.js b/test/index.js index 5b3269f368..abd2fc2174 100644 --- a/test/index.js +++ b/test/index.js @@ -2,8 +2,13 @@ var fork = require("child_process").fork; var path = require("path"); var fs = require('fs'); -var bin = "./node_modules/.bin/istanbul"; -var cov = "cover --report=lcov --dir=test/coverage/js _mocha --".split(" "); +var bin = "./node_modules/.bin/nyc"; +var cov = [ + "--reporter=lcov", + "--reporter=text-summary", + "--report-dir=test/coverage/js", + "mocha" +] if (process.platform === 'win32') { bin = "./node_modules/mocha/bin/mocha"; @@ -18,7 +23,7 @@ var args = cov.concat([ "15000" ]); -if (!process.env.APPVEYOR && !process.env.TRAVIS) { +if (!process.env.APPVEYOR && !process.env.TRAVIS && !process.env.GITHUB_ACTION) { var local = path.join.bind(path, __dirname); var dummyPath = local("home"); process.env.HOME = dummyPath; @@ -35,6 +40,8 @@ function unencryptKey(fileName) { .toString('ascii'); fs.writeFileSync(path.join(__dirname, fileName), asciiContents, 'utf8'); } + +unencryptKey('private.ppk'); unencryptKey('id_rsa'); unencryptKey('nodegit-test-rsa'); diff --git a/test/private.ppk.enc b/test/private.ppk.enc new file mode 100644 index 0000000000..c388d98d86 --- /dev/null +++ b/test/private.ppk.enc @@ -0,0 +1,47 @@ +UHVUVFktVXNlci1LZXktRmlsZS0zOiBzc2gtcnNhCkVuY3J5cHRpb246IG5vbmUKQ29tbWVudDog +bm9uYW1lClB1YmxpYy1MaW5lczogMTIKQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQ0FRQ3dP +Y0VONis1Vkx2VXBQSGp3WnNiMHZIUEJBRzcxUmV5YwovQzAyMkhDWENJV05mOUwxSWduR25nSWU0 +OXdxdGUvWXFyYm5xMHVqOXlrSjE4Nm1GZHdoeUtBNUR2dm5SM1FmClQ5OG12VE94bnpDTUJ1QlU3 +M002UEpDYTZVekpoeXhtTjNVTU1uanEvU25oN3hycVdkYW1YQ2ROS3hRL0QxUlkKVXEzVGdLbnBI +bGxhSEI0dnoydHZvRXpBY0dzZ014aFZ4NWdoZ1YxT1Y1NzNYNFVDRXA1UVJNNHJ2WE9KMDRDLwpn +Mzl5KzFNQnladUpveno4OHBaUFg4UFVISGJOemFWd2JCeDFVRnVJdFVGeGo3WkRGK3hubDdTTDBl +RW0vc0dxClh3akhVenJlSmgwQXpZQUNYRVYwd3czZ0xSQ1pmSDEwa3RLMVlTcTFhQ0Vld1Z3MHNu +TUl3cjNSMkFjZkR5K0QKM1dhMXd0WGxNb3lDaTFhRUhwdmd1OFVOQWhteDdJVy9pcEdEZHcyaGQ0 +OVh1ZUh2MjVwaHI4S1ROdVRnWmdMNgpXYkVQckFSM3dieHp6QXJ5dmJEbWJ4N1RSbUNRQVJxZlA5 +MTlDQmF5MmwyUGgvNDFDNFJZV3pPeUxHS1F3bzFXCnRwbk9FVnBPRTRLYmRNK3g5dzBxQndEdTVa +TnRqcGd6WlBrbVBSSzhZU2NtUkRkSDRmNU1lT1dPd3Y3YTVoTUgKZHg2Q0JheGF3bUt1VFZLL2Q3 +VU9wRmRQRnhLbjJzU0l1RzZwWUlyOHJFTHF0ZU1EUnRVa2ZYUUtBWFl4NlJRVAphYmgvSlZ3ODB2 +OFJkaEs1YVRQTld3dlZ6RFpHek9VcTBjQkV2dmdjbG5PQzFqYnE4bEFyMzJlZzQvWXkyRWJpCjV2 +MkoydVBlNlE9PQpQcml2YXRlLUxpbmVzOiAyOApBQUFDQUYvalRSU1NKK0RaNkRSRDMxcVAwZm9o +QUtzbjNUaEFqL3Jyakg2blRyd2Z1dXUvZmI0OGd5MDdsTlBTCkVGNTlHTURBUXozVGl4and4N2Uv +WVlZbHB0NExHSU56ajhYTXIzYktNeFlWSlNlbEF2bHVWR3BpMEVRRDZIc2kKTHRJSlp6TkhRYjBk +U1lZenNySnBOREFJS2kvalBNOVVmWFBDbDVabmhvWHJJSWprSnFKTlltK0pZV0FmelNONQpDQkZC +UENDUXRrcWs1V3hYYVB3WlVYcExQekZWL3dqMTBRUlJ2V0IzNE1WajAwckp0SVF6K2w5NGNDbEla +bm5uCjh3MFF0TkJ6UXhqaVhLd0tWRTNDY040WkNsMWp3cTNCeWMwNkdZN21ueFEyU1hYUEwwRGNr +S2E0Sm1MZUw1S24KY3J6UmJFSWVFZUQzdWhGelUzSTByRU5RUmg2NjlJcHJhamZSekxjRnhuQzcz +QlMxUlR5Y2ZwZFNHRk9QWFQvMgpOZDM2MlhTZXA2Vk54U3Q1YTZ3a1dnSEpoMkg4ZFBjbmlESDFE +bXJBZFA5N0FrZWkxbG1YcWJ6Q0lWOXBpZDNtCmZNQUpiSm00UmhRcFREdmV1RThOaUJnSTYyVGt3 +dWNvVzJ5M0xkU3YxMzdpSkJuSlNjMTFHOUE1YkcyeVFYRTIKYWlYUXhIeW9UMlF0VmY1WklYUmFW +THViUW5jU2dMQmpnQ2QxQ2xqQjVqbFJZTXRTYzhiRmEvVEo1b1hPVk10QwpYNHhhcVo3Z3JHS05C +cUJsRFRXblRnWk1tdndQVnVjbGU3MjgzNk9KUFhvMUxqM0sza21yeEMxdU5HMDc2My96CmJLOWl2 +QWF1SFRMMnQ0cHkra0k0NC9ZcERvR2sybnhsV01kRDNMM0pVeDNRejlMeEFBQUJBUURiVG94anFO +UHIKRk9aYlg5M3g2TjU1SVdOQ1VLdXFMMVZLNjFESGFLWGZpaXdIRHlRYjEzUnhPREk2RlNXMElI +eVpqMDh5ZjBTVApJRjlzWU1BcDcvRktRTkRJalVWcjFSNmdERWdBdytjeS9naXlqMFVscUhNc2dU +UnNnSmEvSjJQYnViRDRWMzdZClJEOHpwdmtLZjhTSjBiUWpRMGlMdGFDVWJvQVA1ZmFhWGxJS3Zl +Mnh6S3VYM09Jdk0zTyt1UTBJMDZqZGtMc2IKQUcxL2dBOXpiZmltcEx3SUJCZFVJenY3aEUyajhi +aGpPR201U0lOa3M0WWVUTlRWV2R4ZnI3Yi8wVlFPSXJDdwpEUCsrQmhjOTdkK1g3WVVVZFFIM1Bw +U1dyVm9KTnNITXB1VFpoaXdDZ0ZNTU9UWEhHVmxqRThyZ2RlU2xQc2dCCjFzUWh4clJTTUIrZkFB +QUJBUUROdGU0KzFubFlLVjZFUVl4cnlKVUFLT3RNS0pmQTVYUlRzc1hoc0V5UjAwcS8KV2N3dHJm +ZjM1dzdmQVhCVndlTnplZWl5cGV2SnNZZ1JwQXU5T01ZdHdYRUJWNUYreUlCUWtpRzE3YlNlei9z +YgpueW9pV1U2QkFYR2JkQ2pmYTRlVUVURnpiY2xqdEtsZ0FCUkdqV0Q3UEZPNldmcEFqUXBjamFR +cElUNlh6WGZ1CmZXZ3RsbkZrV3lQZFd6Skw0MldZQ3pjYVNySlNGS2Z6TkR2bVIzc25ZTmRwdWxN +WlBLZUZ2bWUyVFpqdVRSUkUKN3U4S1pGeFBqUGQrQThHYW5Cck5QaWpQY0lxNUphQ1p6TDM1ZGhp +WHBiQkMyUzJYeXJLcG1jK1hKUTgycWVPdwpkNmU5b0pWMTNQN0pTc1lhRWp2UVR5TnI2QTZuNDgy +L3FtUmR1MVIzQUFBQkFRQ1c4emFSOU9lTzFwM1hpMTRYCjkwM245Y2NjUlVwZEw4RlJQNnkxaStU +WmY3ZFJaZWt5bVg2Rm1Pam0wL25vVzNIcGVaMmJiRGlBUERadVc0ZlEKbWdaMGd6bG9kRENlSnZQ +cXZTUXNRZUhKNGMyR3NUbjFUczMzWFNUVnhVS3d3amxGdkxqbzJCQXZpQWhmd2FDdwpSbFRoaWtH +L0J0LzBtWE5tMXpwcVlucUFzWldvcmo5VVZBNjJPVzgxQ1VYMS9kVTFrY1JBaWNjbFFsak5TRFhi +CnppYlU3am91enNDM0VVRURwbUdkbVRRWHhYc0I1MVVOaHl5VmgyZ0FWcUMwMDJtSndnRVVzanBx +c2FMNk03M2EKejgzODhvRHBYYnAxQ0tMRHloOHZKcElOSDBqTk5kRDF3V29BUlFlSHMvTmJRRktR +OElOSXZQc1p6MFpHZkc3Lwp1UytKClByaXZhdGUtTUFDOiBmMjY3ZTM0MzYwOTViZDc5OWYwNzQw +NDExZmJhMDM0YzZjOWNiN2VhYzk1ZDg4NDk3ZGVlYmMxNGZjZWQ0ZDU2Cg== diff --git a/test/runner.js b/test/runner.js index 89732a158a..3ca48486bd 100644 --- a/test/runner.js +++ b/test/runner.js @@ -5,20 +5,39 @@ var exec = require('../utils/execPromise'); var NodeGit = require('..'); -if(process.env.NODEGIT_TEST_THREADSAFETY) { - console.log('Enabling thread safety in NodeGit'); - NodeGit.enableThreadSafety(); -} else if (process.env.NODEGIT_TEST_THREADSAFETY_ASYNC) { - console.log('Enabling thread safety for async actions only in NodeGit'); - NodeGit.setThreadSafetyStatus(NodeGit.THREAD_SAFETY.ENABLED_FOR_ASYNC_ONLY); -} - var workdirPath = local("repos/workdir"); +var constWorkdirPath = local("repos/constworkdir"); + +const testRepos = [ + "repos/bare", + "repos/blameRepo", + "repos/cherrypick", + "repos/clone", + "repos/constworkdir", + "repos/convenientLineTest", + "repos/empty", + "repos/index", + "repos/index", + "repos/merge", + "repos/merge-head", + "repos/new", + "repos/newrepo", + "repos/nonrepo", + "repos/rebase", + "repos/renamedFileRepo", + "repos/revertRepo", + "repos/stagingRepo", + "repos/submodule", + "repos/submodule/nodegittest/", + "repos/tree/", + "repos/workdir", +]; before(function() { this.timeout(350000); - var url = "https://github.com/nodegit/test"; + var testUrl = "https://github.com/nodegit/test"; + var constTestUrl = "https://github.com/nodegit/test-frozen"; return fse.remove(local("repos")) .then(function() { fse.remove(local("home")) @@ -30,7 +49,17 @@ before(function() { return exec("git init " + local("repos", "empty")); }) .then(function() { - return exec("git clone " + url + " " + workdirPath); + return exec("git clone " + constTestUrl + " " + constWorkdirPath); + }) + .then(function() { + return exec("git clone " + testUrl + " " + workdirPath); + }) + .then(function() { + //to checkout the longpaths-checkout branch + if(process.platform === "win32") { + return exec("git config core.longpaths true", {cwd: workdirPath}); + } + return Promise.resolve(); }) .then(function() { return exec("git checkout rev-walk", {cwd: workdirPath}); @@ -38,6 +67,9 @@ before(function() { .then(function() { return exec("git checkout checkout-test", {cwd: workdirPath}); }) + .then(function() { + return exec("git checkout longpaths-checkout", {cwd: workdirPath}); + }) .then(function() { return exec("git checkout master", {cwd: workdirPath}); }) @@ -54,7 +86,13 @@ before(function() { .then(function() { return fse.writeFile(local("home", ".gitconfig"), "[user]\n name = John Doe\n email = johndoe@example.com"); - }); + }) + .then( async function() { + //mark all test repos as safe + for(let repo of testRepos) { + await exec(`git config --global --add safe.directory ${local(repo)}`); + } + }) }); beforeEach(function() { diff --git a/test/tests/blob.js b/test/tests/blob.js index 3635f6ba33..2d6512c86c 100644 --- a/test/tests/blob.js +++ b/test/tests/blob.js @@ -128,7 +128,7 @@ describe("Blob", function() { describe("createFromBuffer", function() { it("creates a new blob from the buffer", function() { var content = "This is a new buffer"; - var buf = new Buffer(content, content.length); + var buf = Buffer.from(content, content.length); var test = this; return Blob.createFromBuffer(test.repository, buf, content.length) @@ -142,7 +142,7 @@ describe("Blob", function() { it("creates blob with content equal to length", function() { var content = "This is a new buffer"; - var buf = new Buffer(content, content.length); + var buf = Buffer.from(content, content.length); var test = this; return Blob.createFromBuffer(test.repository, buf, 2) @@ -171,7 +171,7 @@ describe("Blob", function() { it("throws an error when no length is provided", function() { var test = this; - return Blob.createFromBuffer(test.repository, new Buffer("testing")) + return Blob.createFromBuffer(test.repository, Buffer.from("testing")) .catch(function(error) { assert.strictEqual(error.message, "Number len is required."); }); @@ -272,7 +272,7 @@ describe("Blob", function() { }); }); - describe("filteredContent", function() { + describe("filteredContent (DEPRECATED)", function() { var attrFileName = ".gitattributes"; var filter = "* text eol=crlf"; var lineEndingRegex = /\r\n|\r|\n/; @@ -368,7 +368,7 @@ describe("Blob", function() { it("returns nothing when checking binary blob", function() { var test = this; - var binary = new Buffer(new Uint8Array([1,2,3,4,5,6])); + var binary = Buffer.from(new Uint8Array([1,2,3,4,5,6])); return commitFile( test.repository, @@ -410,7 +410,7 @@ describe("Blob", function() { it("returns blob when not checking binary on binary blob", function() { var test = this; - var binary = new Buffer(new Uint8Array([1,2,3,4,5,6])); + var binary = Buffer.from(new Uint8Array([1,2,3,4,5,6])); return commitFile( test.repository, @@ -479,4 +479,184 @@ describe("Blob", function() { }); }); }); + + describe("filter", function() { + var attrFileName = ".gitattributes"; + var filter = "* text eol=crlf"; + var lineEndingRegex = /\r\n|\r|\n/; + var newFileName = "testfile.test"; + + it("retrieves the filtered content", function() { + var test = this; + + return commitFile( + test.repository, + attrFileName, + filter, + "added gitattributes") + .then(function() { + return commitFile( + test.repository, + newFileName, + "this\nis\nfun\guys", + "added LF ending file" + ); + }) + .then(function(oid) { + return test.repository.getCommit(oid); + }) + .then(function(commit) { + test.filteredCommit = commit; + return commit.getEntry(newFileName); + }) + .then(function(entry) { + return entry.getBlob(); + }) + .then(function(lfBlob) { + test.lfBlob = lfBlob; + var ending = test.lfBlob.toString().match(lineEndingRegex); + assert.strictEqual(ending[0], "\n"); + + return test.lfBlob.filter(newFileName, { flags: 0 }); + }) + .then(function(content) { + var ending = content.match(lineEndingRegex); + assert.strictEqual(ending[0], "\r\n"); + assert.notStrictEqual(content, test.blob.toString()); + }); + }); + + it("returns non-binary filtered content when checking binary", function() { + var test = this; + + return commitFile( + test.repository, + attrFileName, + filter, + "added gitattributes") + .then(function() { + return commitFile( + test.repository, + newFileName, + "this\nis\nfun\guys", + "added LF ending file" + ); + }) + .then(function(oid) { + return test.repository.getCommit(oid); + }) + .then(function(commit) { + test.filteredCommit = commit; + return commit.getEntry(newFileName); + }) + .then(function(entry) { + return entry.getBlob(); + }) + .then(function(lfBlob) { + test.lfBlob = lfBlob; + var ending = test.lfBlob.toString().match(lineEndingRegex); + assert.strictEqual(ending[0], "\n"); + + return test.lfBlob.filter( + newFileName, + { flags: NodeGit.Blob.FILTER_FLAG.CHECK_FOR_BINARY } + ); + }) + .then(function(content) { + var ending = content.match(lineEndingRegex); + assert.strictEqual(ending[0], "\r\n"); + assert.notStrictEqual(content, test.blob.toString()); + }); + }); + + it("returns nothing when checking binary blob", function() { + var test = this; + var binary = Buffer.from(new Uint8Array([1,2,3,4,5,6])); + + return commitFile( + test.repository, + attrFileName, + filter, + "added gitattributes") + .then(function() { + return commitFile( + test.repository, + newFileName, + binary, + "binary content" + ); + }) + .then(function(oid) { + return test.repository.getCommit(oid); + }) + .then(function(commit) { + test.filteredCommit = commit; + return commit.getEntry(newFileName); + }) + .then(function(entry) { + return entry.getBlob(); + }) + .then(function(binaryBlob) { + test.binaryBlob = binaryBlob; + assert.equal(true, binaryBlob.isBinary()); + + return test.binaryBlob.filter( + newFileName, + { flags: NodeGit.Blob.FILTER_FLAG.CHECK_FOR_BINARY } + ); + }) + .then(function(content) { + assert.strictEqual(content, ""); + }); + }); + + it("returns blob when not checking binary on binary blob", function() { + var test = this; + var binary = Buffer.from(new Uint8Array([1,2,3,4,5,6])); + + return commitFile( + test.repository, + attrFileName, + filter, + "added gitattributes") + .then(function() { + return commitFile( + test.repository, + newFileName, + binary, + "binary content" + ); + }) + .then(function(oid) { + return test.repository.getCommit(oid); + }) + .then(function(commit) { + test.filteredCommit = commit; + return commit.getEntry(newFileName); + }) + .then(function(entry) { + return entry.getBlob(); + }) + .then(function(binaryBlob) { + test.binaryBlob = binaryBlob; + assert.equal(true, binaryBlob.isBinary()); + + return test.binaryBlob.filter( + newFileName, + { flags: 0 } + ); + }) + .then(function(content) { + assert.strictEqual(content, binary.toString()); + }); + }); + + it("throws an error when the path is null", function() { + var test = this; + return test.blob.filter(test.blob, null, { flags: 0 }) + .catch(function(err) { + assert.strictEqual(err.message, "String as_path is required."); + }); + }); + }); }); diff --git a/test/tests/checkout.js b/test/tests/checkout.js index e3815a35f1..c10d50d3a7 100644 --- a/test/tests/checkout.js +++ b/test/tests/checkout.js @@ -14,6 +14,7 @@ describe("Checkout", function() { var readMePath = local("../repos/workdir/" + readMeName); var packageJsonPath = local("../repos/workdir/" + packageJsonName); var checkoutBranchName = "checkout-test"; + var longpathBranchName = "longpaths-checkout"; beforeEach(function() { var test = this; @@ -35,6 +36,52 @@ describe("Checkout", function() { }); }); + it("can checkout a branch with a long file path", function() { + var test = this; + + return (function () { + if(process.platform === "win32") { + return test.repository.config() + .then(function(config) { + return config.setBool("core.longpaths", true); + }); + } + + return Promise.resolve(); + })() + .then(function() { + return test.repository.checkoutBranch(longpathBranchName); + }) + .then(function() { + return test.repository.getStatus(); + }) + .then(function(statuses) { + assert.equal(statuses.length, 0); + }); + }); + + it("cannot checkout long path file if core.longpaths is not set on win32", function() { + var test = this; + + if (process.platform !== "win32") { + this.skip(); + } + + return test.repository.config() + .then(function(config) { + config.setBool("core.longpaths", false); + }) + .then(function () { + return test.repository.checkoutBranch(longpathBranchName); + }) + .then(function() { + assert.fail(); + }) + .catch(function(err) { + assert(~err.message.indexOf("path too long")); + }); + }); + it("can force checkout a single file", function() { var test = this; @@ -109,60 +156,63 @@ describe("Checkout", function() { }); it("can checkout an index with conflicts", function() { - var test = this; + const test = this; - var testBranchName = "test"; - var ourCommit; + const testBranchName = "test"; + let ourCommit; + let signature; - return test.repository.getBranchCommit(checkoutBranchName) - .then(function(commit) { + return test.repository.defaultSignature() + .then((signatureResult) => { + signature = signatureResult; + return test.repository.getBranchCommit(checkoutBranchName); + }) + .then((commit) => { ourCommit = commit; return test.repository.createBranch(testBranchName, commit.id()); }) - .then(function() { + .then(() => { return test.repository.checkoutBranch(testBranchName); }) - .then(function(branch) { + .then((branch) => { fse.writeFileSync(packageJsonPath, "\n"); return test.repository.refreshIndex() - .then(function(index) { + .then((index) => { return index.addByPath(packageJsonName) - .then(function() { + .then(() => { return index.write(); }) - .then(function() { + .then(() => { return index.writeTree(); }); }); }) - .then(function(oid) { + .then((oid) => { assert.equal(oid.toString(), "85135ab398976a4d5be6a8704297a45f2b1e7ab2"); - var signature = test.repository.defaultSignature(); - return test.repository.createCommit("refs/heads/" + testBranchName, signature, signature, "we made breaking changes", oid, [ourCommit]); }) - .then(function(commit) { + .then((commit) => { return Promise.all([ test.repository.getBranchCommit(testBranchName), test.repository.getBranchCommit("master") ]); }) - .then(function(commits) { + .then((commits) => { return NodeGit.Merge.commits(test.repository, commits[0], commits[1], null); }) - .then(function(index) { + .then((index) => { assert.ok(index); assert.ok(index.hasConflicts && index.hasConflicts()); return NodeGit.Checkout.index(test.repository, index); }) - .then(function() { + .then(() => { // Verify that the conflict has been written to disk var conflictedContent = fse.readFileSync(packageJsonPath, "utf-8"); @@ -178,7 +228,7 @@ describe("Checkout", function() { return Checkout.head(test.repository, opts); }) - .then(function() { + .then(() => { var finalContent = fse.readFileSync(packageJsonPath, "utf-8"); assert.equal(finalContent, "\n"); }); diff --git a/test/tests/clone.js b/test/tests/clone.js index 03a03ade9b..f256e85f51 100644 --- a/test/tests/clone.js +++ b/test/tests/clone.js @@ -4,12 +4,33 @@ var fse = require("fs-extra"); var local = path.join.bind(path, __dirname); var _ = require("lodash"); + +const generatePathWithLength = (base, length) => { + let path = `${base}/`; + const baseLength = path.length; + const remaining = length - baseLength; + + for (let i = 0; i < remaining; ++i) { + // add a slash every 240 characters, but not as first or last character + if (i % 239 == 0 && i != remaining - 1 && i != 0) { + path += "/"; + } else { + path += "a"; + } + } + + assert.ok(path.length === length); + + return path; +}; + describe("Clone", function() { var NodeGit = require("../../"); var Repository = NodeGit.Repository; var Clone = NodeGit.Clone; var clonePath = local("../repos/clone"); + var longClonePath = generatePathWithLength(clonePath, 600); var sshPublicKeyPath = local("../id_rsa.pub"); var sshPrivateKeyPath = local("../id_rsa"); @@ -20,11 +41,14 @@ describe("Clone", function() { this.timeout(30000); beforeEach(function() { - return fse.remove(clonePath).catch(function(err) { - console.log(err); - - throw err; - }); + return fse.remove(clonePath) + .then(function() { + return fse.remove(longClonePath); + }) + .catch(function(err) { + console.log(err); + throw err; + }); }); it.skip("can clone with http", function() { @@ -43,9 +67,7 @@ describe("Clone", function() { var opts = { fetchOpts: { callbacks: { - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } } }; @@ -202,9 +224,7 @@ describe("Clone", function() { var opts = { fetchOpts: { callbacks: { - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } } }; @@ -221,11 +241,9 @@ describe("Clone", function() { var opts = { fetchOpts: { callbacks: { - certificateCheck: function() { - return 1; - }, + certificateCheck: () => 0, credentials: function(url, userName) { - return NodeGit.Cred.sshKeyFromAgent(userName); + return NodeGit.Credential.sshKeyFromAgent(userName); } } } @@ -243,11 +261,9 @@ describe("Clone", function() { var opts = { fetchOpts: { callbacks: { - certificateCheck: function() { - return 1; - }, + certificateCheck: () => 0, credentials: function(url, userName) { - return NodeGit.Cred.sshKeyNew( + return NodeGit.Credential.sshKeyNew( userName, sshPublicKeyPath, sshPrivateKeyPath, @@ -269,11 +285,9 @@ describe("Clone", function() { var opts = { fetchOpts: { callbacks: { - certificateCheck: function() { - return 1; - }, + certificateCheck: () => 0, credentials: function(url, userName) { - return NodeGit.Cred.sshKeyNew( + return NodeGit.Credential.sshKeyNew( userName, sshEncryptedPublicKeyPath, sshEncryptedPrivateKeyPath, @@ -290,15 +304,15 @@ describe("Clone", function() { }); }); - it("can clone with git", function() { + // Since 15 March the unauthenticated git protocol on port 9418 is no longer supported in Github. + // https://github.blog/2021-09-01-improving-git-protocol-security-github/ + it.skip("can clone with git", function() { var test = this; var url = "git://github.com/nodegit/test.git"; var opts = { fetchOpts: { callbacks: { - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } } }; @@ -328,16 +342,14 @@ describe("Clone", function() { return Clone(url, clonePath, { fetchOpts: { callbacks: { - certificateCheck: function() { - return 1; - }, + certificateCheck: () => 0, credentials: function() { if (firstPass) { firstPass = false; - return NodeGit.Cred.userpassPlaintextNew("fake-token", + return NodeGit.Credential.userpassPlaintextNew("fake-token", "x-oauth-basic"); } else { - return NodeGit.Cred.defaultNew(); + return NodeGit.Credential.defaultNew(); } } } diff --git a/test/tests/commit.js b/test/tests/commit.js index d6e6a5ce78..8ff9b8ea78 100644 --- a/test/tests/commit.js +++ b/test/tests/commit.js @@ -17,6 +17,7 @@ describe("Commit", function() { var Oid = NodeGit.Oid; var reposPath = local("../repos/workdir"); + var newRepoPath = local("../repos/new"); var oid = "fce88902e66c72b5b93e75bdb5ae717038b221f6"; function reinitialize(test) { @@ -127,6 +128,18 @@ describe("Commit", function() { assert.equal(this.commit.timeOffset(), 780); }); + it("can call getTree on a parent commit", function() { + return this.commit.parent(0) + .then(function(parent) { + return parent.getTree(); + }) + .then(function(tree) { + assert.equal( + tree.id().toString(), "327ff68e59f94f0c25d2c62fb0938efa01e8a107" + ); + }); + }); + it("can create a commit", function() { var test = this; var expectedCommitId = "315e77328ef596f3bc065d8ac6dd2c72c09de8a5"; @@ -273,7 +286,7 @@ describe("Commit", function() { it("can amend commit", function(){ var commitToAmendId = "315e77328ef596f3bc065d8ac6dd2c72c09de8a5"; - var expectedAmendedCommitId = "57836e96555243666ea74ea888310cc7c41d4613"; + var expectedAmendedCommitId = "a41de0d1c3dc169c873dd03bd9240d9f88e60ffc"; var fileName = "newfile.txt"; var fileContent = "hello world"; var newFileName = "newerfile.txt"; @@ -450,6 +463,242 @@ describe("Commit", function() { }); }); + describe("amendWithSignature", function() { + it("can amend with signature", function() { + const signedData = "-----BEGIN PGP SIGNATURE-----\n" + + "\n" + + "iQJHBAEBCAAxFiEEKdxGpJ93wnkLaBKfURjJKedOfEMFAlxPKUYTHHN0ZXZla0Bh\n" + + "eG9zb2Z0LmNvbQAKCRBRGMkp5058Q3vcD/0Uf6P68g98Kbvsgjg/aidM1ujruXaw\n" + + "X5WSsCAw+wWGICOj0n+KBnmQruI4HSFz3zykEshuOpcBv1X/+huwDeB/hBqonCU8\n" + + "QdexCdWR70YbT1bufesUwV9v1qwE4WOmFxWXgwh55K0wDRkc0u2aLcwrJkIEEVfs\n" + + "HqZyFzU4kwbGekY/m7d1DsBhWyKEGW9/25WMYmjWOWOiaFjeBaHLlxiEM8KGnMLH\n" + + "wx37NuFuaABgi23AAcBGdeWy04TEuU4S51+bHM3RotrZ2cryW2lEbkkXodhIJcq0\n" + + "RgrStCbvR0ehnOPdYSiRbxK8JNLZuNjHlK2g7wVi+C83vwMQuhU4H6OlYHGVr664\n" + + "4YzL83FdIo7wiMOFd2OOMLlCfHgTun60FvjCs4WHjrwH1fQl287FRPLa/4olBSQP\n" + + "yUXJaZdxm4cB4L/1pmbb/J/XUiOio3MpaN3GFm2hZloUlag1uPDBtCxTl5odvj4a\n" + + "GOmTBWznXxF/zrKnQVSvv+EccNxYFc0VVjAxGgNqPzIxDAKtw1lE5pbBkFpFpNHz\n" + + "StmwZkP9QIJY4hJYQfM+pzHLe8xjexL+Kh/TrYXgY1m/4vJe0HJSsnRnaR8Yfqhh\n" + + "LReqo94VHRYXR0rZQv4py0D9TrWaI8xHLve6ewhLPNRzyaI9fNrinbcPYZZOWnRi\n" + + "ekgUBx+BX6nJOw==\n" + + "=4Hy5\n" + + "-----END PGP SIGNATURE-----"; + + const onSignature = () => ({ + code: NodeGit.Error.CODE.OK, + field: "gpgsig", + signedData + }); + + var repo; + var oid; + var commit; + var message; + var parents; + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return repo.getHeadCommit(); + }) + .then(function(headCommit) { + message = headCommit.message() + "\n"; + parents = headCommit.parents(); + + return headCommit.amendWithSignature( + null, + null, + null, + null, + null, + null, + onSignature + ); + }) + .then(function(oidResult) { + oid = oidResult; + return NodeGit.Commit.lookup(repo, oid); + }) + .then(function(commitResult) { + commit = commitResult; + return commit.getSignature("gpgsig"); + }) + .then(function(signatureInfo) { + assert.equal(signatureInfo.signature, signedData); + assert.equal(commit.message(), message); + assert.deepEqual(commit.parents(), parents); + }); + }); + + it("will respects overridden arguments", function() { + const signedData = "-----BEGIN PGP SIGNATURE-----\n" + + "\n" + + "iQJHBAEBCAAxFiEEKdxGpJ93wnkLaBKfURjJKedOfEMFAlxPKUYTHHN0ZXZla0Bh\n" + + "eG9zb2Z0LmNvbQAKCRBRGMkp5058Q3vcD/0Uf6P68g98Kbvsgjg/aidM1ujruXaw\n" + + "X5WSsCAw+wWGICOj0n+KBnmQruI4HSFz3zykEshuOpcBv1X/+huwDeB/hBqonCU8\n" + + "QdexCdWR70YbT1bufesUwV9v1qwE4WOmFxWXgwh55K0wDRkc0u2aLcwrJkIEEVfs\n" + + "HqZyFzU4kwbGekY/m7d1DsBhWyKEGW9/25WMYmjWOWOiaFjeBaHLlxiEM8KGnMLH\n" + + "wx37NuFuaABgi23AAcBGdeWy04TEuU4S51+bHM3RotrZ2cryW2lEbkkXodhIJcq0\n" + + "RgrStCbvR0ehnOPdYSiRbxK8JNLZuNjHlK2g7wVi+C83vwMQuhU4H6OlYHGVr664\n" + + "4YzL83FdIo7wiMOFd2OOMLlCfHgTun60FvjCs4WHjrwH1fQl287FRPLa/4olBSQP\n" + + "yUXJaZdxm4cB4L/1pmbb/J/XUiOio3MpaN3GFm2hZloUlag1uPDBtCxTl5odvj4a\n" + + "GOmTBWznXxF/zrKnQVSvv+EccNxYFc0VVjAxGgNqPzIxDAKtw1lE5pbBkFpFpNHz\n" + + "StmwZkP9QIJY4hJYQfM+pzHLe8xjexL+Kh/TrYXgY1m/4vJe0HJSsnRnaR8Yfqhh\n" + + "LReqo94VHRYXR0rZQv4py0D9TrWaI8xHLve6ewhLPNRzyaI9fNrinbcPYZZOWnRi\n" + + "ekgUBx+BX6nJOw==\n" + + "=4Hy5\n" + + "-----END PGP SIGNATURE-----"; + + const onSignature = () => ({ + code: NodeGit.Error.CODE.OK, + field: "gpgsig", + signedData + }); + + var repo; + var oid; + var commit; + var message; + var parents; + var commitTree; + + var author = NodeGit.Signature.create( + "Scooby Doo", + "scoob@mystery.com", + 123456789, + 60 + ); + var committer = NodeGit.Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + var tree = Oid.fromString("f4661419a6fbbe865f78644fec722c023ce4b65f"); + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return repo.getHeadCommit(); + }) + .then(function(headCommit) { + message = headCommit.message() + "\n"; + parents = headCommit.parents(); + + return headCommit.amendWithSignature( + null, + author, + committer, + null, + null, + tree, + onSignature + ); + }) + .then(function(oidResult) { + oid = oidResult; + return NodeGit.Commit.lookup(repo, oid); + }) + .then(function(commitResult) { + commit = commitResult; + return commit.getTree(); + }) + .then(function(commitTreeResult) { + commitTree = commitTreeResult; + return commit.getSignature("gpgsig"); + }) + .then(function(signatureInfo) { + assert.equal(signatureInfo.signature, signedData); + assert.equal(commit.message(), message); + assert.deepEqual(commit.parents(), parents); + assert.deepEqual(commitTree.id(), tree); + assert.deepEqual(commit.author(), author); + assert.deepEqual(commit.committer(), committer); + }); + }); + + it("can optionally skip signing process", function() { + const onSignature = () => ({ + code: NodeGit.Error.CODE.PASSTHROUGH + }); + + var repo; + var oid; + var commit; + var message; + var parents; + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return repo.getHeadCommit(); + }) + .then(function(headCommit) { + message = headCommit.message(); + parents = headCommit.parents(); + + return headCommit.amendWithSignature( + null, + null, + null, + null, + null, + null, + onSignature + ); + }) + .then(function(oidResult) { + oid = oidResult; + return NodeGit.Commit.lookup(repo, oid); + }) + .then(function(commitResult) { + commit = commitResult; + return commit.getSignature("gpgsig") + .then(function() { + assert.fail("Should not have a signature"); + }, function(error) { + if (error && error.message === "this commit is not signed") { + return; + } + throw error; + }); + }) + .then(function(signatureInfo) { + assert.equal(commit.message(), message); + assert.deepEqual(commit.parents(), parents); + }); + }); + + it("will throw if signing callback returns an error code", function() { + const onSignature = () => ({ + code: NodeGit.Error.CODE.ERROR + }); + + return NodeGit.Repository.open(reposPath) + .then(function(repo) { + return repo.getHeadCommit(); + }) + .then(function(headCommit) { + return headCommit.amendWithSignature( + null, + null, + null, + null, + null, + null, + onSignature + ); + }) + .then(function() { + assert.fail("amendWithSignature should have failed."); + }, function(error) { + if (error && error.errno === NodeGit.Error.CODE.ERROR) { + return; + } + throw error; + }); + }); + }); + it("has an owner", function() { var owner = this.commit.owner(); assert.ok(owner instanceof Repository); @@ -525,6 +774,16 @@ describe("Commit", function() { assert.equal(1, this.commit.parentcount()); }); + it("can fetch a single parent", function() { + return this.commit.parent(0).then(function(parent) { + assert.strictEqual(parent.sha(), + "ecfd36c80a3e9081f200dfda2391acadb56dac27"); + // This used to crash due to a missing .repo property on the retrieved + // parent. + return parent.getTree().then(tree => assert(tree)); + }); + }); + it("can retrieve and walk a commit tree", function() { var commitTreeEntryCount = 0; var expectedCommitTreeEntryCount = 198; @@ -791,6 +1050,354 @@ describe("Commit", function() { }); describe("Commit's Signature", function() { + it("Can create a signed commit in a repo", function() { + var signedData = "-----BEGIN PGP SIGNATURE-----\n" + + "Version: GnuPG v1.4.12 (Darwin)\n" + + "\n" + + "iQIcBAABAgAGBQJQ+FMIAAoJEH+LfPdZDSs1e3EQAJMjhqjWF+WkGLHju7pTw2al\n" + + "o6IoMAhv0Z/LHlWhzBd9e7JeCnanRt12bAU7yvYp9+Z+z+dbwqLwDoFp8LVuigl8\n" + + "JGLcnwiUW3rSvhjdCp9irdb4+bhKUnKUzSdsR2CK4/hC0N2i/HOvMYX+BRsvqweq\n" + + "AsAkA6dAWh+gAfedrBUkCTGhlNYoetjdakWqlGL1TiKAefEZrtA1TpPkGn92vbLq\n" + + "SphFRUY9hVn1ZBWrT3hEpvAIcZag3rTOiRVT1X1flj8B2vGCEr3RrcwOIZikpdaW\n" + + "who/X3xh/DGbI2RbuxmmJpxxP/8dsVchRJJzBwG+yhwU/iN3MlV2c5D69tls/Dok\n" + + "6VbyU4lm/ae0y3yR83D9dUlkycOnmmlBAHKIZ9qUts9X7mWJf0+yy2QxJVpjaTGG\n" + + "cmnQKKPeNIhGJk2ENnnnzjEve7L7YJQF6itbx5VCOcsGh3Ocb3YR7DMdWjt7f8pu\n" + + "c6j+q1rP7EpE2afUN/geSlp5i3x8aXZPDj67jImbVCE/Q1X9voCtyzGJH7MXR0N9\n" + + "ZpRF8yzveRfMH8bwAJjSOGAFF5XkcR/RNY95o+J+QcgBLdX48h+ZdNmUf6jqlu3J\n" + + "7KmTXXQcOVpN6dD3CmRFsbjq+x6RHwa8u1iGn+oIkX908r97ckfB/kHKH7ZdXIJc\n" + + "cpxtDQQMGYFpXK/71stq\n" + + "=ozeK\n" + + "-----END PGP SIGNATURE-----"; + + const onSignature = () => ({ + code: NodeGit.Error.CODE.OK, + field: "gpgsig", + signedData + }); + + var test = this; + var expectedCommitId = "ccb99bb20716ef7c37e92c7b8db029a7af7f747b"; + var fileName = "newfile.txt"; + var fileContent = "hello world"; + + var repo; + var index; + var treeOid; + var parent; + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return fse.writeFile(path.join(repo.workdir(), fileName), fileContent); + }) + .then(function() { + return repo.refreshIndex(); + }) + .then(function(indexResult) { + index = indexResult; + }) + .then(function() { + return index.addByPath(fileName); + }) + .then(function() { + return index.write(); + }) + .then(function() { + return index.writeTree(); + }) + .then(function(oidResult) { + treeOid = oidResult; + return NodeGit.Reference.nameToId(repo, "HEAD"); + }) + .then(function(head) { + return repo.getCommit(head); + }) + .then(function(parentResult) { + parent = parentResult; + return Promise.all([ + NodeGit.Signature.create("Foo Bar", "foo@bar.com", 123456789, 60), + NodeGit.Signature.create("Foo A Bar", "foo@bar.com", 987654321, 90) + ]); + }) + .then(function(signatures) { + var author = signatures[0]; + var committer = signatures[1]; + + return repo.createCommitWithSignature( + null, + author, + committer, + "message", + treeOid, + [parent], + onSignature + ); + }) + .then(function(commitId) { + assert.equal(expectedCommitId, commitId); + return NodeGit.Commit.lookup(repo, commitId); + }) + .then(function(commit) { + return commit.getSignature("gpgsig"); + }) + .then(function(signatureInfo) { + assert.equal(signedData, signatureInfo.signature); + return reinitialize(test); + }, function(reason) { + return reinitialize(test) + .then(function() { + return Promise.reject(reason); + }); + }); + }); + + it("Can create a signed commit in a repo and update existing ref", + function() { + var signedData = "-----BEGIN PGP SIGNATURE-----\n" + + "Version: GnuPG v1.4.12 (Darwin)\n" + + "\n" + + "iQIcBAABAgAGBQJQ+FMIAAoJEH+LfPdZDSs1e3EQAJMjhqjWF+WkGLHju7pTw2al\n" + + "o6IoMAhv0Z/LHlWhzBd9e7JeCnanRt12bAU7yvYp9+Z+z+dbwqLwDoFp8LVuigl8\n" + + "JGLcnwiUW3rSvhjdCp9irdb4+bhKUnKUzSdsR2CK4/hC0N2i/HOvMYX+BRsvqweq\n" + + "AsAkA6dAWh+gAfedrBUkCTGhlNYoetjdakWqlGL1TiKAefEZrtA1TpPkGn92vbLq\n" + + "SphFRUY9hVn1ZBWrT3hEpvAIcZag3rTOiRVT1X1flj8B2vGCEr3RrcwOIZikpdaW\n" + + "who/X3xh/DGbI2RbuxmmJpxxP/8dsVchRJJzBwG+yhwU/iN3MlV2c5D69tls/Dok\n" + + "6VbyU4lm/ae0y3yR83D9dUlkycOnmmlBAHKIZ9qUts9X7mWJf0+yy2QxJVpjaTGG\n" + + "cmnQKKPeNIhGJk2ENnnnzjEve7L7YJQF6itbx5VCOcsGh3Ocb3YR7DMdWjt7f8pu\n" + + "c6j+q1rP7EpE2afUN/geSlp5i3x8aXZPDj67jImbVCE/Q1X9voCtyzGJH7MXR0N9\n" + + "ZpRF8yzveRfMH8bwAJjSOGAFF5XkcR/RNY95o+J+QcgBLdX48h+ZdNmUf6jqlu3J\n" + + "7KmTXXQcOVpN6dD3CmRFsbjq+x6RHwa8u1iGn+oIkX908r97ckfB/kHKH7ZdXIJc\n" + + "cpxtDQQMGYFpXK/71stq\n" + + "=ozeK\n" + + "-----END PGP SIGNATURE-----"; + + const onSignature = () => ({ + code: NodeGit.Error.CODE.OK, + field: "gpgsig", + signedData + }); + + var test = this; + var expectedCommitId = "ccb99bb20716ef7c37e92c7b8db029a7af7f747b"; + var fileName = "newfile.txt"; + var fileContent = "hello world"; + + var repo; + var index; + var treeOid; + var parent; + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return fse.writeFile(path.join(repo.workdir(), fileName), fileContent); + }) + .then(function() { + return repo.refreshIndex(); + }) + .then(function(indexResult) { + index = indexResult; + }) + .then(function() { + return index.addByPath(fileName); + }) + .then(function() { + return index.write(); + }) + .then(function() { + return index.writeTree(); + }) + .then(function(oidResult) { + treeOid = oidResult; + return NodeGit.Reference.nameToId(repo, "HEAD"); + }) + .then(function(head) { + return repo.getCommit(head); + }) + .then(function(parentResult) { + parent = parentResult; + return Promise.all([ + NodeGit.Signature.create("Foo Bar", "foo@bar.com", 123456789, 60), + NodeGit.Signature.create("Foo A Bar", "foo@bar.com", 987654321, 90) + ]); + }) + .then(function(signatures) { + var author = signatures[0]; + var committer = signatures[1]; + + return repo.createCommitWithSignature( + "HEAD", + author, + committer, + "message", + treeOid, + [parent], + onSignature); + }) + .then(function(commitId) { + assert.equal(expectedCommitId, commitId); + return NodeGit.Commit.lookup(repo, commitId); + }) + .then(function(commit) { + return commit.getSignature("gpgsig"); + }) + .then(function(signatureInfo) { + assert.equal(signedData, signatureInfo.signature); + return repo.getHeadCommit(); + }) + .then(function(headCommit) { + assert.equal(expectedCommitId, headCommit.id()); + return undoCommit() + .then(function(){ + return reinitialize(test); + }); + }, function(reason) { + return reinitialize(test) + .then(function() { + return Promise.reject(reason); + }); + }); + }); + + it("Can create a signed commit in bare a repo and update non-existent ref", + function() { + var signedData = "-----BEGIN PGP SIGNATURE-----\n" + + "\n" + + "iQIzBAABCAAdFiEEHYpzGBSIRCy6QrNr0R10kNTwiG8FAlxcuSoACgkQ0R10kNTw\n" + + "iG9sZA//Z6mrX5l//gjtn7Fy3Cg5khasNMZA15JUPzfoSyVkaYM7g/iZrJr4uZmm\n" + + "lrhqxTDP4SUEL6dMOT0fjAudulP19Stv0mUMOoQ9cfvU0DAuFlI1z2Ny9IR+3hJK\n" + + "XpIQCHZAAY9KrGajJvDO+WqukrMwKh2dwaQLgB2+cS7ehBpbW45+l+Bq4hTlULiJ\n" + + "ohZ2SQhqj65knErdbfJ2B7yVlQbfG2vbD6qN4qJOkJpkFRdDhLmGnNjUj+vcmYO2\n" + + "Be5CLyjuhYszzUqys6ix4UHr10KihFk31N17CgA2ZsDSzE3VsMCPlVPV9jWuMceJ\n" + + "0IFsJEXFR4SOlRAq23BxD7aaYao6AF/YBhCQnDiuiQLCJ7WdUAmja6VPyEajAjoX\n" + + "CkdDs1P4N9IeIPvJECn8Df4NEEkzW8sV3i96ryk066m1ZmZWemJ2zdGVbfR+AuFZ\n" + + "7QwgBRidj3thIk0geh9g10+pbRuTzxNXklqxq4DQb3VEXIIJMUcqtN1bUPEPiLyA\n" + + "SU3uJ1THyYznAVZy6aqw+mNq7Lg9gV65LRd0WtNqgneknDZoH3zXyzlcJexjHkRF\n" + + "qt4K6w9TDA2Erda3wE4BM4MCgl1Hc629kH3ROCyWTFuJAEZtNDJPgIc2LTRDhHNd\n" + + "+K937RhWU8lUnI2jJLmKdQDk2dnS1ZepFqA5Ynwza1qDSOgUqVw=\n" + + "=M81P\n" + + "-----END PGP SIGNATURE-----"; + + const onSignature = () => ({ + code: NodeGit.Error.CODE.OK, + field: "gpgsig", + signedData + }); + + var expectedCommitId = "ef11571eb3590007712c7ee3b4a11cd9c6094e30"; + var fileName = "newfile.txt"; + var fileContent = "hello world"; + + var repo; + var index; + var treeOid; + + return NodeGit.Repository.init(newRepoPath, 0) + .then(function(repoResult) { + repo = repoResult; + return fse.writeFile(path.join(repo.workdir(), fileName), fileContent); + }) + .then(function() { + return repo.refreshIndex(); + }) + .then(function(indexResult) { + index = indexResult; + }) + .then(function() { + return index.addByPath(fileName); + }) + .then(function() { + return index.write(); + }) + .then(function() { + return index.writeTree(); + }) + .then(function(oidResult) { + treeOid = oidResult; + return Promise.all([ + NodeGit.Signature.create("Foo Bar", "foo@bar.com", 123456789, 60), + NodeGit.Signature.create("Foo A Bar", "foo@bar.com", 987654321, 90) + ]); + }) + .then(function(signatures) { + var author = signatures[0]; + var committer = signatures[1]; + + return repo.createCommitWithSignature( + "HEAD", + author, + committer, + "message", + treeOid, + [], + onSignature); + }) + .then(function(commitId) { + assert.equal(expectedCommitId, commitId); + return NodeGit.Commit.lookup(repo, commitId); + }) + .then(function(commit) { + return commit.getSignature("gpgsig"); + }) + .then(function(signatureInfo) { + assert.equal(signedData, signatureInfo.signature); + return repo.getHeadCommit(); + }) + .then(function(headCommit) { + assert.equal(expectedCommitId, headCommit.id()); + }); + }); + + it("Can create a signed commit raw", function() { + var expectedCommitId = "cc1401eaac4e9e77190e98a9353b305f0c6313d8"; + + var signature = "-----BEGIN PGP SIGNATURE-----\n\n" + + "iQEcBAABCAAGBQJarBhIAAoJEE8pfTd/81lKQA4IAL8Mu5kc4B/MX9s4XB26Ahap\n" + + "n06kCx3RQ1KHMZIRomAjCnb48WieNVuy1y+Ut0RgfCxxrJ1ZnzFG3kF2bIKwIxNI\n" + + "tYIC76iWny+mrVnb2mjKYjn/3F4c4VJGENq9ITiV1WeE4yJ8dHw2ox2D+hACzTvQ\n" + + "KVroedk8BDFJxS6DFb20To35xbAVhwBnAGRcII4Wi5PPMFpqAhGLfq3Czv95ddSz\n" + + "BHlyp27+YWSpV0Og0dqOEhsdDYaPrOBGRcoRiqjue+l5tgK/QerLFZ4aovZzpuEP\n" + + "Xx1yZfqXIiy4Bo40qScSrdnmnp/kMq/NQGR3jYU+SleFHVKNFsya9UwurMaezY0=\n" + + "=eZzi\n-----END PGP SIGNATURE-----"; + + var commit_content = "tree f4661419a6fbbe865f78644fec722c023ce4b65f\n" + + "parent 32789a79e71fbc9e04d3eff7425e1771eb595150\n" + + "author Tyler Ang-Wanek 1521227848 -0700\n" + + "committer Tyler Ang-Wanek 1521227848 -0700\n\n" + + "GPG Signed commit\n"; + + var repo; + var commit; + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return Commit.createWithSignature( + repo, + commit_content, + signature, + "gpgsig"); + }) + .then(function(commitId) { + assert.equal(expectedCommitId, commitId); + return NodeGit.Commit.lookup(repo, commitId); + }) + .then(function(commitResult) { + commit = commitResult; + return commit.getSignature(); + }) + .then(function(signatureInfoDefault) { + assert.equal(signature, signatureInfoDefault.signature); + assert.equal(commit_content, signatureInfoDefault.signedData); + + return commit.getSignature("gpgsig"); + }) + .then(function(signatureInfo) { + assert.equal(signature, signatureInfo.signature); + assert.equal(commit_content, signatureInfo.signedData); + }); + }); + it("Can retrieve the gpg signature from a commit", function() { var expectedSignedData = "tree f4661419a6fbbe865f78644fec722c023ce4b65f\n" + @@ -827,5 +1434,173 @@ describe("Commit", function() { ); }); }); + + it("Can be optionally skipped to create without signature", function() { + const onSignature = () => ({ + code: NodeGit.Error.CODE.PASSTHROUGH + }); + + var test = this; + var expectedCommitId = "c9bffe040519231d32431c101bca4efc0917f64c"; + var fileName = "newfile.txt"; + var fileContent = "hello world"; + + var repo; + var index; + var treeOid; + var parent; + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return fse.writeFile(path.join(repo.workdir(), fileName), fileContent); + }) + .then(function() { + return repo.refreshIndex(); + }) + .then(function(indexResult) { + index = indexResult; + }) + .then(function() { + return index.addByPath(fileName); + }) + .then(function() { + return index.write(); + }) + .then(function() { + return index.writeTree(); + }) + .then(function(oidResult) { + treeOid = oidResult; + return NodeGit.Reference.nameToId(repo, "HEAD"); + }) + .then(function(head) { + return repo.getCommit(head); + }) + .then(function(parentResult) { + parent = parentResult; + return Promise.all([ + NodeGit.Signature.create("Foo Bar", "foo@bar.com", 123456789, 60), + NodeGit.Signature.create("Foo A Bar", "foo@bar.com", 987654321, 90) + ]); + }) + .then(function(signatures) { + var author = signatures[0]; + var committer = signatures[1]; + + return repo.createCommitWithSignature( + null, + author, + committer, + "message", + treeOid, + [parent], + onSignature + ); + }) + .then(function(commitId) { + assert.equal(expectedCommitId, commitId); + return NodeGit.Commit.lookup(repo, commitId); + }) + .then(function(commit) { + return commit.getSignature("gpgsig") + .then(function() { + assert.fail("Should not have been able to retrieve gpgsig"); + }, function(error) { + if (error && error.message === "this commit is not signed") { + return; + } + throw error; + }); + }) + .then(function() { + return reinitialize(test); + }, function(reason) { + return reinitialize(test) + .then(function() { + return Promise.reject(reason); + }); + }); + }); + + it("Will throw if the signing cb returns an error code", function() { + const onSignature = () => ({ + code: NodeGit.Error.CODE.ERROR + }); + + var test = this; + var fileName = "newfile.txt"; + var fileContent = "hello world"; + + var repo; + var index; + var treeOid; + var parent; + + return NodeGit.Repository.open(reposPath) + .then(function(repoResult) { + repo = repoResult; + return fse.writeFile(path.join(repo.workdir(), fileName), fileContent); + }) + .then(function() { + return repo.refreshIndex(); + }) + .then(function(indexResult) { + index = indexResult; + }) + .then(function() { + return index.addByPath(fileName); + }) + .then(function() { + return index.write(); + }) + .then(function() { + return index.writeTree(); + }) + .then(function(oidResult) { + treeOid = oidResult; + return NodeGit.Reference.nameToId(repo, "HEAD"); + }) + .then(function(head) { + return repo.getCommit(head); + }) + .then(function(parentResult) { + parent = parentResult; + return Promise.all([ + NodeGit.Signature.create("Foo Bar", "foo@bar.com", 123456789, 60), + NodeGit.Signature.create("Foo A Bar", "foo@bar.com", 987654321, 90) + ]); + }) + .then(function(signatures) { + var author = signatures[0]; + var committer = signatures[1]; + + return repo.createCommitWithSignature( + null, + author, + committer, + "message", + treeOid, + [parent], + onSignature + ); + }) + .then(function() { + assert.fail("createCommitWithSignature should have failed."); + }, function(error) { + if (error && error.errno === NodeGit.Error.CODE.ERROR) { + return; + } + throw error; + }) + .then(function() { + return reinitialize(test); + }, function(reason) { + return reinitialize(test) + .then(function() { + return Promise.reject(reason); + }); + }); + }); }); }); diff --git a/test/tests/cred.js b/test/tests/cred.js index 6f0bb46ad4..eee98d69bd 100644 --- a/test/tests/cred.js +++ b/test/tests/cred.js @@ -5,23 +5,23 @@ var local = path.join.bind(path, __dirname); describe("Cred", function() { var NodeGit = require("../../"); - + var sshPublicKey = local("../id_rsa.pub"); var sshPrivateKey = local("../id_rsa"); it("can create default credentials", function() { - var defaultCreds = NodeGit.Cred.defaultNew(); - assert.ok(defaultCreds instanceof NodeGit.Cred); + var defaultCreds = NodeGit.Credential.defaultNew(); + assert.ok(defaultCreds instanceof NodeGit.Credential); }); it("can create ssh credentials using passed keys", function() { - var cred = NodeGit.Cred.sshKeyNew( + var cred = NodeGit.Credential.sshKeyNew( "username", sshPublicKey, sshPrivateKey, ""); - assert.ok(cred instanceof NodeGit.Cred); + assert.ok(cred instanceof NodeGit.Credential); }); it("can create ssh credentials using passed keys in memory", function() { @@ -32,42 +32,42 @@ describe("Cred", function() { encoding: "ascii" }); - return NodeGit.Cred.sshKeyMemoryNew( + return NodeGit.Credential.sshKeyMemoryNew( "username", publicKeyContents, privateKeyContents, "").then(function(cred) { - assert.ok(cred instanceof NodeGit.Cred); + assert.ok(cred instanceof NodeGit.Credential); }); }); it("can create credentials using plaintext", function() { - var plaintextCreds = NodeGit.Cred.userpassPlaintextNew + var plaintextCreds = NodeGit.Credential.userpassPlaintextNew ("username", "password"); - assert.ok(plaintextCreds instanceof NodeGit.Cred); + assert.ok(plaintextCreds instanceof NodeGit.Credential); }); - + it("can create credentials using agent", function() { - var fromAgentCreds = NodeGit.Cred.sshKeyFromAgent + var fromAgentCreds = NodeGit.Credential.sshKeyFromAgent ("username"); - assert.ok(fromAgentCreds instanceof NodeGit.Cred); + assert.ok(fromAgentCreds instanceof NodeGit.Credential); }); it("can create credentials using username", function() { - return NodeGit.Cred.usernameNew + return NodeGit.Credential.usernameNew ("username").then(function(cred) { - assert.ok(cred instanceof NodeGit.Cred); + assert.ok(cred instanceof NodeGit.Credential); }); }); it("can return 1 if a username exists", function() { - var plaintextCreds = NodeGit.Cred.userpassPlaintextNew + var plaintextCreds = NodeGit.Credential.userpassPlaintextNew ("username", "password"); assert.ok(plaintextCreds.hasUsername() === 1); }); it("can return 0 if a username does not exist", function() { - var defaultCreds = NodeGit.Cred.defaultNew(); + var defaultCreds = NodeGit.Credential.defaultNew(); assert.ok(defaultCreds.hasUsername() === 0); }); }); diff --git a/test/tests/diff.js b/test/tests/diff.js index d9dfec7bce..9fd483c388 100644 --- a/test/tests/diff.js +++ b/test/tests/diff.js @@ -112,7 +112,7 @@ describe("Diff", function() { }); }); - it("can walk a DiffList", function() { + it("can walk an Array", function() { return this.diff[0].patches() .then(function(patches) { var patch = patches[0]; @@ -246,7 +246,7 @@ describe("Diff", function() { it("can diff the contents of a file to a string with unicode characters", function(done) { var evilString = "Unicode’s fun!\nAnd it’s good for you!\n"; - var buffer = new Buffer(evilString); + var buffer = Buffer.from(evilString); var test = this; Blob.createFromBuffer(test.repository, buffer, buffer.length) .then(function(oid) { @@ -414,7 +414,7 @@ describe("Diff", function() { }) .then(function([headTree, index]) { const diffOptions = new NodeGit.DiffOptions(); - if (index.caps() & Index.CAP.IGNORE_CASE !== 0) { + if (index.caps() & Index.CAPABILITY.IGNORE_CASE !== 0) { diffOptions.flags |= Diff.OPTION.IGNORE_CASE; } diff --git a/test/tests/filter.js b/test/tests/filter.js index 3a57acbefb..06e138ba5d 100644 --- a/test/tests/filter.js +++ b/test/tests/filter.js @@ -2,6 +2,7 @@ var assert = require("assert"); var fse = require("fs-extra"); var path = require("path"); var local = path.join.bind(path, __dirname); +var garbageCollect = require("../utils/garbage_collect.js"); describe("Filter", function() { var NodeGit = require("../../"); @@ -166,228 +167,6 @@ describe("Filter", function() { }); }); - describe("Initialize", function(){ - it("initializes successfully", function() { - var test = this; - var initialized = false; - return Registry.register(filterName, { - initialize: function() { - initialized = true; - return NodeGit.Error.CODE.OK; - }, - apply: function() {}, - check: function() { - return NodeGit.Error.CODE.PASSTHROUGH; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - }) - .then(function() { - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout" - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - assert.strictEqual(initialized, true); - }); - }); - - it("initializes successfully even on garbage collect", function() { - var test = this; - var initialized = false; - return Registry.register(filterName, { - initialize: function() { - initialized = true; - return NodeGit.Error.CODE.OK; - }, - apply: function() {}, - check: function() { - return NodeGit.Error.CODE.PASSTHROUGH; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - global.gc(); - - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout" - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - assert.strictEqual(initialized, true); - }); - }); - - it("does not initialize successfully", function() { - var test = this; - var initialized = false; - return Registry.register(filterName, { - initialize: function() { - initialized = true; - return NodeGit.Error.CODE.ERROR; - }, - apply: function() {}, - check: function() { - return NodeGit.Error.CODE.PASSTHROUGH; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - }) - .then(function() { - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout" - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function(head) { - assert.fail(head, undefined, "Should not have actually checked out"); - }) - .catch(function(error) { - assert.strictEqual(initialized, true); - }); - }); - }); - - describe("Shutdown", function() { - it("filter successfully shuts down", function() { - var test = this; - var shutdown = false; - return Registry.register(filterName, { - apply: function() {}, - check: function(){ - return NodeGit.Error.CODE.PASSTHROUGH; - }, - shutdown: function(){ - shutdown = true; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout", - { encoding: "utf-8" } - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - return Registry.unregister(filterName); - }) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - assert.strictEqual(shutdown, true); - }); - }); - - it("filter successfully shuts down on garbage collect", function() { - var test = this; - var shutdown = false; - return Registry.register(filterName, { - apply: function() {}, - check: function(){ - return NodeGit.Error.CODE.PASSTHROUGH; - }, - shutdown: function(){ - shutdown = true; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout", - { encoding: "utf-8" } - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - global.gc(); - return Registry.unregister(filterName); - }) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - assert.strictEqual(shutdown, true); - }); - }); - - it("shutdown completes even if there is an error", function() { - var test = this; - var shutdown = false; - return Registry.register(filterName, { - apply: function() {}, - check: function(){ - return NodeGit.Error.CODE.PASSTHROUGH; - }, - shutdown: function(){ - shutdown = true; - throw new Error("I failed"); - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout", - { encoding: "utf-8" } - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - return Registry.unregister(filterName); - }) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - assert.strictEqual(shutdown, true); - }) - .catch(function(error) { - assert.fail(error, null, "The operation should not have failed"); - }); - }); - }); - describe("Apply", function() { before(function() { var test = this; @@ -404,7 +183,7 @@ describe("Filter", function() { var message = "some new fancy filter"; var length = message.length; - var tempBuffer = new Buffer(message, "utf-8"); + var tempBuffer = Buffer.from(message, "utf-8"); var largeBufferSize = 500000000; it("should not apply when check returns GIT_PASSTHROUGH", function(){ @@ -441,7 +220,7 @@ describe("Filter", function() { it("should apply filter when check succeeds", function() { var test = this; - var applied = true; + var applied = false; return Registry.register(filterName, { apply: function() { @@ -476,10 +255,8 @@ describe("Filter", function() { return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.PASSTHROUGH; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.PASSTHROUGH; }, check: function() { return NodeGit.Error.CODE.OK; @@ -522,10 +299,8 @@ describe("Filter", function() { return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return NodeGit.Error.CODE.OK; @@ -558,6 +333,82 @@ describe("Filter", function() { }); }); + it("can run sync callback on checkout without deadlocking", function() { // jshint ignore:line + var test = this; + var syncCallbackResult = 1; + + return Registry.register(filterName, { + apply: function() { + syncCallbackResult = test.repository.isEmpty(); + }, + check: function() { + return NodeGit.Error.CODE.OK; + } + }, 0) + .then(function(result) { + assert.strictEqual(result, NodeGit.Error.CODE.OK); + return fse.writeFile( + packageJsonPath, + "Changing content to trigger checkout", + { encoding: "utf-8" } + ); + }) + .then(function() { + var opts = { + checkoutStrategy: Checkout.STRATEGY.FORCE, + paths: "package.json" + }; + return Checkout.head(test.repository, opts); + }) + .then(function() { + assert.strictEqual(syncCallbackResult, 0); + }); + }); + + // Temporary workaround for LFS checkout. Test skipped. + // To activate when reverting workaround. + // 'Checkout.head' and 'Submodule.lookup' do work with the repo locked. + // They should work together without deadlocking. + it.skip("can run async callback on checkout without deadlocking", function() { // jshint ignore:line + var test = this; + var submoduleNameIn = "vendor/libgit2"; + var asyncCallbackResult = ""; + + return Registry.register(filterName, { + apply: function() { + return NodeGit.Submodule.lookup(test.repository, submoduleNameIn) + .then(function(submodule) { + return submodule.name(); + }) + .then(function(name) { + asyncCallbackResult = name; + return NodeGit.Error.CODE.OK; + }); + }, + check: function() { + return NodeGit.Error.CODE.OK; + } + }, 0) + .then(function(result) { + assert.strictEqual(result, NodeGit.Error.CODE.OK); + return fse.writeFile( + packageJsonPath, + "Changing content to trigger checkout", + { encoding: "utf-8" } + ); + }) + .then(function() { + var opts = { + checkoutStrategy: Checkout.STRATEGY.FORCE, + paths: "package.json" + }; + return Checkout.head(test.repository, opts); + }) + .then(function() { + assert.equal(asyncCallbackResult, submoduleNameIn); + }); + }); + // this test is useless on 32 bit CI, because we cannot construct // a buffer big enough to test anything of significance :)... if (process.arch === "x64") { @@ -568,10 +419,8 @@ describe("Filter", function() { return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(largeBuffer, largeBufferSize) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(largeBuffer, largeBufferSize); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return NodeGit.Error.CODE.OK; @@ -626,10 +475,8 @@ describe("Filter", function() { return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return NodeGit.Error.CODE.OK; @@ -645,7 +492,7 @@ describe("Filter", function() { ); assert.notStrictEqual(readmeContent, message); fse.writeFileSync(readmePath, "whoa", "utf8"); - global.gc(); + garbageCollect(); var opts = { checkoutStrategy: Checkout.STRATEGY.FORCE, @@ -668,16 +515,13 @@ describe("Filter", function() { return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return src.path() === "README.md" ? 0 : NodeGit.Error.CODE.PASSTHROUGH; - }, - cleanup: function() {} + } }, 0) .then(function(result) { assert.strictEqual(result, NodeGit.Error.CODE.OK); @@ -725,19 +569,16 @@ describe("Filter", function() { return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return src.path() === "README.md" ? 0 : NodeGit.Error.CODE.PASSTHROUGH; - }, - cleanup: function() {} + } }, 0) .then(function(result) { - global.gc(); + garbageCollect(); assert.strictEqual(result, NodeGit.Error.CODE.OK); }) .then(function() { @@ -754,7 +595,7 @@ describe("Filter", function() { ); }) .then(function(oid) { - global.gc(); + garbageCollect(); return test.repository.getHeadCommit(); }) .then(function(commit) { @@ -767,7 +608,7 @@ describe("Filter", function() { postInitializeReadmeContents, "testing commit contents" ); assert.strictEqual(commit.message(), "test commit"); - global.gc(); + garbageCollect(); return commit.getEntry("README.md"); }) @@ -781,152 +622,6 @@ describe("Filter", function() { }); }); - describe("Cleanup", function() { - it("is called successfully", function() { - var test = this; - var cleaned = false; - return Registry.register(filterName, { - initialize: function() { - return NodeGit.Error.CODE.OK; - }, - apply: function() { - return NodeGit.Error.CODE.OK; - }, - check: function() { - return NodeGit.Error.CODE.OK; - }, - cleanup: function() { - cleaned = true; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - }) - .then(function() { - var packageContent = fse.readFileSync( - packageJsonPath, - "utf-8" - ); - assert.notEqual(packageContent, ""); - - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout", - { encoding: "utf-8" } - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - assert.strictEqual(cleaned, true); - }); - }); - - it("is called successfully with gc", function() { - var test = this; - var cleaned = false; - return Registry.register(filterName, { - initialize: function() { - return NodeGit.Error.CODE.OK; - }, - apply: function() { - return NodeGit.Error.CODE.OK; - }, - check: function() { - return NodeGit.Error.CODE.OK; - }, - cleanup: function() { - cleaned = true; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - }) - .then(function() { - var packageContent = fse.readFileSync( - packageJsonPath, - "utf-8" - ); - assert.notEqual(packageContent, ""); - - global.gc(); - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout", - { encoding: "utf-8" } - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "package.json" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - assert.strictEqual(cleaned, true); - }); - }); - - it("is not called when check returns GIT_PASSTHROUGH", function() { - var test = this; - var cleaned = false; - - return Registry.register(filterName, { - initialize: function() { - return NodeGit.Error.CODE.OK; - }, - apply: function() { - return NodeGit.Error.CODE.OK; - }, - check: function() { - return NodeGit.Error.CODE.PASSTHROUGH; - }, - cleanup: function() { - cleaned = true; - } - }, 0) - .then(function(result) { - assert.strictEqual(result, NodeGit.Error.CODE.OK); - }) - .then(function() { - var packageContent = fse.readFileSync( - packageJsonPath, - "utf-8" - ); - var readmeContent = fse.readFileSync( - readmePath, - "utf-8" - ); - - assert.notEqual(packageContent, ""); - assert.notEqual(readmeContent, "Initialized"); - }) - .then(function() { - return fse.writeFile( - packageJsonPath, - "Changing content to trigger checkout", - { encoding: "utf-8" } - ); - }) - .then(function() { - var opts = { - checkoutStrategy: Checkout.STRATEGY.FORCE, - paths: "README.md" - }; - return Checkout.head(test.repository, opts); - }) - .then(function() { - assert.notStrictEqual(cleaned, true); - }); - }); - }); - describe("Manually Apply", function() { beforeEach(function() { var test = this; @@ -948,18 +643,15 @@ describe("Filter", function() { var message = "This is the filtered content, friends"; var length = message.length; - var tempBuffer = new Buffer(message, "utf-8"); + var tempBuffer = Buffer.from(message, "utf-8"); it("applies the filters for a path on demand", function() { var test = this; - var list; return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return NodeGit.Error.CODE.OK; @@ -984,8 +676,7 @@ describe("Filter", function() { NodeGit.Filter.FLAG.DEFAULT ); }) - .then(function(_list) { - list = _list; + .then(function(list) { return list.applyToFile(test.repository, "README.md"); }) .then(function(content) { @@ -995,14 +686,11 @@ describe("Filter", function() { it("applies the filters to a buffer on demand", function() { var test = this; - var list; return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return NodeGit.Error.CODE.OK; @@ -1027,8 +715,7 @@ describe("Filter", function() { NodeGit.Filter.FLAG.DEFAULT ); }) - .then(function(_list) { - list = _list; + .then(function(list) { /* jshint ignore:start */ return list.applyToData(new String("garbo garbo garbo garbo")); /* jshint ignore:end */ @@ -1044,10 +731,8 @@ describe("Filter", function() { return Registry.register(filterName, { apply: function(to, from, source) { - return to.set(tempBuffer, length) - .then(function() { - return NodeGit.Error.CODE.OK; - }); + to.set(tempBuffer, length); + return NodeGit.Error.CODE.OK; }, check: function(src, attr) { return NodeGit.Error.CODE.OK; @@ -1091,6 +776,68 @@ describe("Filter", function() { }) .then(function(content) { assert.equal(content, message); + list = null; + }); + }); + }); + + describe("FilterSource", function() { + var message = "some new fancy filter"; + + before(function() { + var test = this; + return fse.readFile(readmePath, "utf8") + .then((function(content) { + test.originalReadmeContent = content; + })); + }); + + afterEach(function() { + this.timeout(15000); + return fse.writeFile(readmePath, this.originalReadmeContent); + }); + + it("a FilterSource has an async repo getter", function() { + var test = this; + + return Registry.register(filterName, { + apply: function(to, from, source) { + return source.repo() + .then(function() { + return NodeGit.Error.CODE.PASSTHROUGH; + }); + }, + check: function(source) { + return source.repo() + .then(function() { + return NodeGit.Error.CODE.OK; + }); + } + }, 0) + .then(function(result) { + assert.strictEqual(result, NodeGit.Error.CODE.OK); + }) + .then(function() { + var readmeContent = fse.readFileSync( + packageJsonPath, + "utf-8" + ); + assert.notStrictEqual(readmeContent, message); + + return fse.writeFile( + packageJsonPath, + "Changing content to trigger checkout" + ); + }) + .then(function() { + var opts = { + checkoutStrategy: Checkout.STRATEGY.FORCE, + paths: "package.json" + }; + return Checkout.head(test.repository, opts); + }) + .then(function() { + garbageCollect(); }); }); }); diff --git a/test/tests/graph.js b/test/tests/graph.js index 34805cda08..87dcfda544 100644 --- a/test/tests/graph.js +++ b/test/tests/graph.js @@ -58,7 +58,21 @@ describe("Graph", function() { "26744fc697849d370246749b67ac43b792a4af0c" ) .catch(function(result) { - assert(~result.message.indexOf("81b06fac")); + assert(~result.message.indexOf("object not found - no match for id")); + }); + }); + + it("can tell if a commit is reachable from any of a list of commits", function() { + return Graph.reachableFromAny( + this.repository, + "32789a79e71fbc9e04d3eff7425e1771eb595150", + [ + "1729c73906bb8467f4095c2f4044083016b4dfde", + "e0aeedcff0584ebe00aed2c03c8ecd10839df908" + ] + ) + .then(function(result) { + assert.equal(result, 0); }); }); }); diff --git a/test/tests/merge.js b/test/tests/merge.js index 80bb0589d7..a5010925b2 100644 --- a/test/tests/merge.js +++ b/test/tests/merge.js @@ -646,9 +646,11 @@ describe("Merge", function() { ourSignature, NodeGit.Merge.PREFERENCE.NO_FASTFORWARD, null, - function(message) { - assert(message === "Merge branch 'theirs' into ours"); - return "We manipulated the message, HAH."; + { + processMergeMessageCallback: function(message) { + assert(message === "Merge branch 'theirs' into ours"); + return "We manipulated the message, HAH."; + } } ); }) @@ -803,9 +805,11 @@ describe("Merge", function() { ourSignature, NodeGit.Merge.PREFERENCE.NO_FASTFORWARD, null, - function(message) { - assert(message === "Merge branch 'theirs' into ours"); - return Promise.resolve("We manipulated the message, HAH."); + { + processMergeMessageCallback: function(message) { + assert(message === "Merge branch 'theirs' into ours"); + return Promise.resolve("We manipulated the message, HAH."); + } } ); }) @@ -1588,7 +1592,7 @@ describe("Merge", function() { }) .then(function(commitOid) { assert.equal(commitOid.toString(), - "03ba156a7a1660f179b6b2dbc6a542fcf88d022d"); + "8221726e3f96e3d3e0258f655e107383dc3c7335"); // merge isn't cleaned up automatically assert.ok(fse.existsSync(path.join(repoGitPath, "MERGE_HEAD"))); diff --git a/test/tests/patch.js b/test/tests/patch.js index 158685a08e..fe7fe3734a 100644 --- a/test/tests/patch.js +++ b/test/tests/patch.js @@ -61,4 +61,49 @@ describe("Patch", function() { }); }); + + it("can generate patch from blobs", function() { + // Generates a patch for README.md from commit + // fce88902e66c72b5b93e75bdb5ae717038b221f6 + const file = "README.md"; + + return NodeGit.Blob.lookup( + this.repository, + "b252f396b17661462372f78b7bcfc403b8731aaa" + ).then(blob => { + return NodeGit.Blob.lookup( + this.repository, + "b8d014998072c3f9e4b7eba8486011e80d8de98a" + ).then(oldBlob => { + return NodeGit.Patch.fromBlobs(oldBlob, file, blob, file) + .then(patch => { + assert.strictEqual(patch.size(0, 0, 0), 254); + }); + }); + }); + }); + + it("can generate patch from blobs without 'old_blob'", function() { + // Generates a patch for README.md from commit + // fce88902e66c72b5b93e75bdb5ae717038b221f6 without + // old_blob. Should show all lines as additions. + const file = "README.md"; + + return NodeGit.Blob.lookup( + this.repository, + "b252f396b17661462372f78b7bcfc403b8731aaa" + ).then(blob => { + return NodeGit.Patch.fromBlobs(null, file, blob, file) + .then(patch => { + assert.strictEqual(patch.size(0, 0, 0), 8905); + }); + }); + }); + + it("can generate patch from blobs without arguments", function() { + return NodeGit.Patch.fromBlobs() + .then(patch => { + assert.strictEqual(patch.size(0, 0, 0), 0); + }); + }); }); diff --git a/test/tests/rebase.js b/test/tests/rebase.js index ae9aab1d1f..d9aae1fbce 100644 --- a/test/tests/rebase.js +++ b/test/tests/rebase.js @@ -3,6 +3,10 @@ var path = require("path"); var local = path.join.bind(path, __dirname); var fse = require("fs-extra"); +var garbageCollect = require("../utils/garbage_collect.js"); + +const isNode8 = process.versions.node.split(".")[0] === "8"; + describe("Rebase", function() { var NodeGit = require("../../"); var Checkout = NodeGit.Checkout; @@ -145,7 +149,7 @@ describe("Rebase", function() { }); }); - it("can cleanly rebase a branch onto another branch", function() { + it("can cleanly rebase a branch in-memory", function() { var baseFileName = "baseNewFile.txt"; var ourFileName = "ourNewFile.txt"; var theirFileName = "theirNewFile.txt"; @@ -262,6 +266,187 @@ describe("Rebase", function() { var ourAnnotatedCommit = annotatedCommits[0]; var theirAnnotatedCommit = annotatedCommits[1]; + assert.equal(ourAnnotatedCommit.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + assert.equal(theirAnnotatedCommit.id().toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + var rebaseOptions = new NodeGit.RebaseOptions(); + rebaseOptions.inmemory = 1; + + return NodeGit.Rebase.init(repository, ourAnnotatedCommit, + theirAnnotatedCommit, undefined, rebaseOptions); + }) + .then(function(newRebase) { + rebase = newRebase; + + // there should only be 1 rebase operation to perform + assert.equal(rebase.operationEntrycount(), 1); + + return rebase.next(); + }) + .then(function(rebaseOperation) { + assert.equal(rebaseOperation.type(), + NodeGit.RebaseOperation.REBASE_OPERATION.PICK); + assert.equal(rebaseOperation.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + return rebase.commit(null, ourSignature); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "b937100ee0ea17ef20525306763505a7fe2be29e"); + + // git_rebase_operation_current returns the index of the rebase + // operation that was last applied, so after the first operation, it + // should be 0. + assert.equal(rebase.operationCurrent(), 0); + + return rebase.finish(ourSignature, {}); + }) + .then(function(result) { + assert.equal(result, 0); + + return repository.getBranchCommit(ourBranchName); + }) + .then(function(commit) { + // verify that the "ours" branch has NOT moved. + // In-memory rebase does not touch refs. + assert.equal(commit.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + // Lookup the new commit + return NodeGit.Commit.lookup(repository, + "b937100ee0ea17ef20525306763505a7fe2be29e"); + }) + .then(function(commit) { + // Lookup the parent of our new commit + return commit.parent(0); + }) + .then(function(commit) { + // verify that we are on top of "their commit" + assert.equal(commit.id().toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + }); + }); + + it("can cleanly rebase a branch onto another branch", function() { + var baseFileName = "baseNewFile.txt"; + var ourFileName = "ourNewFile.txt"; + var theirFileName = "theirNewFile.txt"; + + var baseFileContent = "How do you feel about Toll Roads?"; + var ourFileContent = "I like Toll Roads. I have an EZ-Pass!"; + var theirFileContent = "I'm skeptical about Toll Roads"; + + var ourSignature = NodeGit.Signature.create + ("Ron Paul", "RonPaul@TollRoadsRBest.info", 123456789, 60); + var theirSignature = NodeGit.Signature.create + ("Greg Abbott", "Gregggg@IllTollYourFace.us", 123456789, 60); + + var repository = this.repository; + var ourCommit; + var ourBranch; + var theirBranch; + var rebase; + + return fse.writeFile(path.join(repository.workdir(), baseFileName), + baseFileContent) + // Load up the repository index and make our initial commit to HEAD + .then(function() { + return RepoUtils.addFileToIndex(repository, baseFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "b5cdc109d437c4541a13fb7509116b5f03d5039a"); + + return repository.createCommit("HEAD", ourSignature, + ourSignature, "initial commit", oid, []); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "be03abdf0353d05924c53bebeb0e5bb129cda44a"); + + return repository.getCommit(commitOid).then(function(commit) { + ourCommit = commit; + }).then(function() { + return repository.createBranch(ourBranchName, commitOid) + .then(function(branch) { + ourBranch = branch; + return repository.createBranch(theirBranchName, commitOid); + }); + }); + }) + .then(function(branch) { + theirBranch = branch; + return fse.writeFile(path.join(repository.workdir(), theirFileName), + theirFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, theirFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "be5f0fd38a39a67135ad68921c93cd5c17fefb3d"); + + return repository.createCommit(theirBranch.name(), theirSignature, + theirSignature, "they made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + return removeFileFromIndex(repository, theirFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), theirFileName)); + }) + .then(function() { + return fse.writeFile(path.join(repository.workdir(), ourFileName), + ourFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, ourFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "77867fc0bfeb3f80ab18a78c8d53aa3a06207047"); + + return repository.createCommit(ourBranch.name(), ourSignature, + ourSignature, "we made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + return removeFileFromIndex(repository, ourFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), ourFileName)); + }) + .then(function() { + return repository.checkoutBranch(ourBranchName); + }) + .then(function() { + return Promise.all([ + repository.getReference(ourBranchName), + repository.getReference(theirBranchName) + ]); + }) + .then(function(refs) { + assert.equal(refs.length, 2); + + return Promise.all([ + NodeGit.AnnotatedCommit.fromRef(repository, refs[0]), + NodeGit.AnnotatedCommit.fromRef(repository, refs[1]) + ]); + }) + .then(function(annotatedCommits) { + assert.equal(annotatedCommits.length, 2); + + var ourAnnotatedCommit = annotatedCommits[0]; + var theirAnnotatedCommit = annotatedCommits[1]; + assert.equal(ourAnnotatedCommit.id().toString(), "e7f37ee070837052937e24ad8ba66f6d83ae7941"); assert.equal(theirAnnotatedCommit.id().toString(), @@ -1534,4 +1719,825 @@ describe("Rebase", function() { "b3c355bb606ec7da87174dfa1a0b0c0e3dc97bc0"); }); }); + + if (!isNode8) { + it("can sign commits during the rebase", function() { + var baseFileName = "baseNewFile.txt"; + var ourFileName = "ourNewFile.txt"; + var theirFileName = "theirNewFile.txt"; + + var baseFileContent = "How do you feel about Toll Roads?"; + var ourFileContent = "I like Toll Roads. I have an EZ-Pass!"; + var theirFileContent = "I'm skeptical about Toll Roads"; + + var ourSignature = NodeGit.Signature.create + ("Ron Paul", "RonPaul@TollRoadsRBest.info", 123456789, 60); + var theirSignature = NodeGit.Signature.create + ("Greg Abbott", "Gregggg@IllTollYourFace.us", 123456789, 60); + + var repository = this.repository; + var ourCommit; + var ourBranch; + var theirBranch; + var rebase; + + return fse.writeFile(path.join(repository.workdir(), baseFileName), + baseFileContent) + // Load up the repository index and make our initial commit to HEAD + .then(function() { + return RepoUtils.addFileToIndex(repository, baseFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "b5cdc109d437c4541a13fb7509116b5f03d5039a"); + + return repository.createCommit("HEAD", ourSignature, + ourSignature, "initial commit", oid, []); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "be03abdf0353d05924c53bebeb0e5bb129cda44a"); + + return repository.getCommit(commitOid).then(function(commit) { + ourCommit = commit; + }).then(function() { + return repository.createBranch(ourBranchName, commitOid) + .then(function(branch) { + ourBranch = branch; + return repository.createBranch(theirBranchName, commitOid); + }); + }); + }) + .then(function(branch) { + theirBranch = branch; + return fse.writeFile(path.join(repository.workdir(), theirFileName), + theirFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, theirFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "be5f0fd38a39a67135ad68921c93cd5c17fefb3d"); + + return repository.createCommit(theirBranch.name(), theirSignature, + theirSignature, "they made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + return removeFileFromIndex(repository, theirFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), theirFileName)); + }) + .then(function() { + return fse.writeFile(path.join(repository.workdir(), ourFileName), + ourFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, ourFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "77867fc0bfeb3f80ab18a78c8d53aa3a06207047"); + + return repository.createCommit(ourBranch.name(), ourSignature, + ourSignature, "we made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + return removeFileFromIndex(repository, ourFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), ourFileName)); + }) + .then(function() { + return repository.checkoutBranch(ourBranchName); + }) + .then(function() { + return Promise.all([ + repository.getReference(ourBranchName), + repository.getReference(theirBranchName) + ]); + }) + .then(function(refs) { + assert.equal(refs.length, 2); + + return Promise.all([ + NodeGit.AnnotatedCommit.fromRef(repository, refs[0]), + NodeGit.AnnotatedCommit.fromRef(repository, refs[1]) + ]); + }) + .then(function(annotatedCommits) { + assert.equal(annotatedCommits.length, 2); + + var ourAnnotatedCommit = annotatedCommits[0]; + var theirAnnotatedCommit = annotatedCommits[1]; + + assert.equal(ourAnnotatedCommit.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + assert.equal(theirAnnotatedCommit.id().toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + return NodeGit.Rebase.init(repository, ourAnnotatedCommit, + theirAnnotatedCommit, null, { + signingCb: (commitContent) => ({ + code: NodeGit.Error.CODE.OK, + field: "moose-sig", + signedData: "A moose was here." + }) + }); + }) + .then(function(newRebase) { + rebase = newRebase; + + // there should only be 1 rebase operation to perform + assert.equal(rebase.operationEntrycount(), 1); + + return rebase.next(); + }) + .then(function(rebaseOperation) { + assert.equal(rebaseOperation.type(), + NodeGit.RebaseOperation.REBASE_OPERATION.PICK); + assert.equal(rebaseOperation.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + // Make sure we don't crash calling the signature CB + // after collecting garbage. + garbageCollect(); + + return rebase.commit(null, ourSignature); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "617cd03370dd799f372e9dcfcd0b097aede1bd7f"); + + // git_rebase_operation_current returns the index of the rebase + // operation that was last applied, so after the first operation, it + // should be 0. + assert.equal(rebase.operationCurrent(), 0); + + return rebase.finish(ourSignature, {}); + }) + .then(function(result) { + assert.equal(result, 0); + + return repository.getBranchCommit(ourBranchName); + }) + .then(function(commit) { + // verify that the "ours" branch has moved to the correct place + assert.equal(commit.id().toString(), + "617cd03370dd799f372e9dcfcd0b097aede1bd7f"); + + return Promise.all([ + commit.parent(0), + NodeGit.Commit.extractSignature( + repository, + "617cd03370dd799f372e9dcfcd0b097aede1bd7f", + "moose-sig" + ) + ]); + }) + .then(function([parent, { signature }]) { + // verify that we are on top of "their commit" + assert.equal(parent.id().toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + assert.equal(signature, "A moose was here."); + }); + }); + + it("can optionally skip signing commits", function() { + var baseFileName = "baseNewFile.txt"; + var ourFileName = "ourNewFile.txt"; + var theirFileName = "theirNewFile.txt"; + + var baseFileContent = "How do you feel about Toll Roads?"; + var ourFileContent = "I like Toll Roads. I have an EZ-Pass!"; + var theirFileContent = "I'm skeptical about Toll Roads"; + + var ourSignature = NodeGit.Signature.create + ("Ron Paul", "RonPaul@TollRoadsRBest.info", 123456789, 60); + var theirSignature = NodeGit.Signature.create + ("Greg Abbott", "Gregggg@IllTollYourFace.us", 123456789, 60); + + var repository = this.repository; + var ourCommit; + var ourBranch; + var theirBranch; + var rebase; + + return fse.writeFile(path.join(repository.workdir(), baseFileName), + baseFileContent) + // Load up the repository index and make our initial commit to HEAD + .then(function() { + return RepoUtils.addFileToIndex(repository, baseFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "b5cdc109d437c4541a13fb7509116b5f03d5039a"); + + return repository.createCommit("HEAD", ourSignature, + ourSignature, "initial commit", oid, []); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "be03abdf0353d05924c53bebeb0e5bb129cda44a"); + + return repository.getCommit(commitOid).then(function(commit) { + ourCommit = commit; + }).then(function() { + return repository.createBranch(ourBranchName, commitOid) + .then(function(branch) { + ourBranch = branch; + return repository.createBranch(theirBranchName, commitOid); + }); + }); + }) + .then(function(branch) { + theirBranch = branch; + return fse.writeFile(path.join(repository.workdir(), theirFileName), + theirFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, theirFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "be5f0fd38a39a67135ad68921c93cd5c17fefb3d"); + + return repository.createCommit(theirBranch.name(), theirSignature, + theirSignature, "they made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + return removeFileFromIndex(repository, theirFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), theirFileName)); + }) + .then(function() { + return fse.writeFile(path.join(repository.workdir(), ourFileName), + ourFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, ourFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "77867fc0bfeb3f80ab18a78c8d53aa3a06207047"); + + return repository.createCommit(ourBranch.name(), ourSignature, + ourSignature, "we made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + return removeFileFromIndex(repository, ourFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), ourFileName)); + }) + .then(function() { + return repository.checkoutBranch(ourBranchName); + }) + .then(function() { + return Promise.all([ + repository.getReference(ourBranchName), + repository.getReference(theirBranchName) + ]); + }) + .then(function(refs) { + assert.equal(refs.length, 2); + + return Promise.all([ + NodeGit.AnnotatedCommit.fromRef(repository, refs[0]), + NodeGit.AnnotatedCommit.fromRef(repository, refs[1]) + ]); + }) + .then(function(annotatedCommits) { + assert.equal(annotatedCommits.length, 2); + + var ourAnnotatedCommit = annotatedCommits[0]; + var theirAnnotatedCommit = annotatedCommits[1]; + + assert.equal(ourAnnotatedCommit.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + assert.equal(theirAnnotatedCommit.id().toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + return NodeGit.Rebase.init(repository, ourAnnotatedCommit, + theirAnnotatedCommit, null, { + signingCb: () => ({ + code: NodeGit.Error.CODE.PASSTHROUGH + }) + }); + }) + .then(function(newRebase) { + rebase = newRebase; + + // there should only be 1 rebase operation to perform + assert.equal(rebase.operationEntrycount(), 1); + + return rebase.next(); + }) + .then(function(rebaseOperation) { + assert.equal(rebaseOperation.type(), + NodeGit.RebaseOperation.REBASE_OPERATION.PICK); + assert.equal(rebaseOperation.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + // Make sure we don't crash calling the signature CB + // after collecting garbage. + garbageCollect(); + + return rebase.commit(null, ourSignature); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "b937100ee0ea17ef20525306763505a7fe2be29e"); + + // git_rebase_operation_current returns the index of the rebase + // operation that was last applied, so after the first operation, it + // should be 0. + assert.equal(rebase.operationCurrent(), 0); + + return rebase.finish(ourSignature, {}); + }) + .then(function(result) { + assert.equal(result, 0); + + return repository.getBranchCommit(ourBranchName); + }) + .then(function(commit) { + // verify that the "ours" branch has moved to the correct place + assert.equal(commit.id().toString(), + "b937100ee0ea17ef20525306763505a7fe2be29e"); + + return commit.parent(0); + }) + .then(function(parent) { + // verify that we are on top of "their commit" + assert.equal(parent.id().toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + return NodeGit.Commit.extractSignature( + repository, + "b937100ee0ea17ef20525306763505a7fe2be29e", + "moose-sig" + ) + .then(function() { + assert.fail("This commit should not be signed."); + }, function (error) { + if (error && error.message === "this commit is not signed") { + return; + } + throw error; + }); + }); + }); + + it("will throw if commit signing cb returns an error code", function() { + var baseFileName = "baseNewFile.txt"; + var ourFileName = "ourNewFile.txt"; + var theirFileName = "theirNewFile.txt"; + + var baseFileContent = "How do you feel about Toll Roads?"; + var ourFileContent = "I like Toll Roads. I have an EZ-Pass!"; + var theirFileContent = "I'm skeptical about Toll Roads"; + + var ourSignature = NodeGit.Signature.create + ("Ron Paul", "RonPaul@TollRoadsRBest.info", 123456789, 60); + var theirSignature = NodeGit.Signature.create + ("Greg Abbott", "Gregggg@IllTollYourFace.us", 123456789, 60); + + var repository = this.repository; + var ourCommit; + var ourBranch; + var theirBranch; + var rebase; + + return fse.writeFile(path.join(repository.workdir(), baseFileName), + baseFileContent) + // Load up the repository index and make our initial commit to HEAD + .then(function() { + return RepoUtils.addFileToIndex(repository, baseFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "b5cdc109d437c4541a13fb7509116b5f03d5039a"); + + return repository.createCommit("HEAD", ourSignature, + ourSignature, "initial commit", oid, []); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "be03abdf0353d05924c53bebeb0e5bb129cda44a"); + + return repository.getCommit(commitOid).then(function(commit) { + ourCommit = commit; + }).then(function() { + return repository.createBranch(ourBranchName, commitOid) + .then(function(branch) { + ourBranch = branch; + return repository.createBranch(theirBranchName, commitOid); + }); + }); + }) + .then(function(branch) { + theirBranch = branch; + return fse.writeFile(path.join(repository.workdir(), theirFileName), + theirFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, theirFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "be5f0fd38a39a67135ad68921c93cd5c17fefb3d"); + + return repository.createCommit(theirBranch.name(), theirSignature, + theirSignature, "they made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + return removeFileFromIndex(repository, theirFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), theirFileName)); + }) + .then(function() { + return fse.writeFile(path.join(repository.workdir(), ourFileName), + ourFileContent); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, ourFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "77867fc0bfeb3f80ab18a78c8d53aa3a06207047"); + + return repository.createCommit(ourBranch.name(), ourSignature, + ourSignature, "we made a commit", oid, [ourCommit]); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + return removeFileFromIndex(repository, ourFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), ourFileName)); + }) + .then(function() { + return repository.checkoutBranch(ourBranchName); + }) + .then(function() { + return Promise.all([ + repository.getReference(ourBranchName), + repository.getReference(theirBranchName) + ]); + }) + .then(function(refs) { + assert.equal(refs.length, 2); + + return Promise.all([ + NodeGit.AnnotatedCommit.fromRef(repository, refs[0]), + NodeGit.AnnotatedCommit.fromRef(repository, refs[1]) + ]); + }) + .then(function(annotatedCommits) { + assert.equal(annotatedCommits.length, 2); + + var ourAnnotatedCommit = annotatedCommits[0]; + var theirAnnotatedCommit = annotatedCommits[1]; + + assert.equal(ourAnnotatedCommit.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + assert.equal(theirAnnotatedCommit.id().toString(), + "e9ebd92f2f4778baf6fa8e92f0c68642f931a554"); + + return NodeGit.Rebase.init(repository, ourAnnotatedCommit, + theirAnnotatedCommit, null, { + signingCb: () => ({ + code: NodeGit.Error.CODE.ERROR + }) + }); + }) + .then(function(newRebase) { + rebase = newRebase; + + // there should only be 1 rebase operation to perform + assert.equal(rebase.operationEntrycount(), 1); + + return rebase.next(); + }) + .then(function(rebaseOperation) { + assert.equal(rebaseOperation.type(), + NodeGit.RebaseOperation.REBASE_OPERATION.PICK); + assert.equal(rebaseOperation.id().toString(), + "e7f37ee070837052937e24ad8ba66f6d83ae7941"); + + // Make sure we don't crash calling the signature CB + // after collecting garbage. + garbageCollect(); + + return rebase.commit(null, ourSignature); + }) + .then(function() { + assert.fail("rebase.commit should have failed"); + }, function(error) { + if (error && error.errno === NodeGit.Error.CODE.ERROR) { + return; + } + throw error; + }); + }); + } + + it("will not throw on patch already applied errors", function() { + var baseFileName = "baseNewFile.txt"; + var theirFileName = "myFile.txt"; + + var baseFileContent = "How do you feel about Toll Roads?"; + var theirFileContent = "Hello there"; + + var ourSignature = NodeGit.Signature.create + ("Ron Paul", "RonPaul@TollRoadsRBest.info", 123456789, 60); + var theirSignature = NodeGit.Signature.create + ("Greg Abbott", "Gregggg@IllTollYourFace.us", 123456789, 60); + + var repository = this.repository; + var initialCommit; + var ourBranch; + var theirBranch; + var rebase; + + return fse.writeFile(path.join(repository.workdir(), baseFileName), + baseFileContent) + // Load up the repository index and make our initial commit to HEAD + .then(function() { + return RepoUtils.addFileToIndex(repository, baseFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "b5cdc109d437c4541a13fb7509116b5f03d5039a"); + + return repository.createCommit("HEAD", ourSignature, + ourSignature, "initial commit", oid, []); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "be03abdf0353d05924c53bebeb0e5bb129cda44a"); + + return repository.getCommit(commitOid).then(function(commit) { + initialCommit = commit; + }).then(function() { + return repository.createBranch(ourBranchName, commitOid) + .then(function(branch) { + ourBranch = branch; + return repository.createBranch(theirBranchName, commitOid); + }); + }); + }) + .then(function(branch) { + theirBranch = branch; + return fse.writeFile( + path.join(repository.workdir(), theirFileName), + theirFileContent + ); + }) + .then(function() { + return RepoUtils.addFileToIndex(repository, theirFileName); + }) + .then(function(oid) { + assert.equal(oid.toString(), + "6f14d06b24fa8ea26f511dd8a94a003fd37eadc5"); + + return repository.createCommit(theirBranch.name(), theirSignature, + theirSignature, "they made a commit", oid, [initialCommit]) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "c4cc225184b9c9682cb48294358d9d65f8ec42c7"); + return repository.createCommit(ourBranch.name(), ourSignature, + ourSignature, "we made a commit", oid, [initialCommit]); + }); + }) + .then(function(commitOid) { + assert.equal(commitOid.toString(), + "5814ffa17b8a677191d89d5372f1e46d50d976ae"); + + return removeFileFromIndex(repository, theirFileName); + }) + .then(function() { + return fse.remove(path.join(repository.workdir(), theirFileName)); + }) + .then(function() { + return repository.checkoutBranch(ourBranchName); + }) + .then(function() { + return Promise.all([ + repository.getReference(ourBranchName), + repository.getReference(theirBranchName) + ]); + }) + .then(function(refs) { + assert.equal(refs.length, 2); + + return Promise.all([ + NodeGit.AnnotatedCommit.fromRef(repository, refs[0]), + NodeGit.AnnotatedCommit.fromRef(repository, refs[1]) + ]); + }) + .then(function(annotatedCommits) { + assert.equal(annotatedCommits.length, 2); + + var ourAnnotatedCommit = annotatedCommits[0]; + var theirAnnotatedCommit = annotatedCommits[1]; + + assert.equal(ourAnnotatedCommit.id().toString(), + "5814ffa17b8a677191d89d5372f1e46d50d976ae"); + assert.equal(theirAnnotatedCommit.id().toString(), + "c4cc225184b9c9682cb48294358d9d65f8ec42c7"); + + return NodeGit.Rebase.init( + repository, + ourAnnotatedCommit, + theirAnnotatedCommit + ); + }) + .then(function(newRebase) { + rebase = newRebase; + + // there should only be 1 rebase operation to perform + assert.equal(rebase.operationEntrycount(), 1); + + return rebase.next(); + }) + .catch(function(error) { + assert.fail(error); + + throw error; + }) + .then(function(rebaseOperation) { + assert.equal(rebaseOperation.type(), + NodeGit.RebaseOperation.REBASE_OPERATION.PICK); + assert.equal(rebaseOperation.id().toString(), + "5814ffa17b8a677191d89d5372f1e46d50d976ae"); + + return rebase.commit(null, ourSignature); + }) + .then(function() { + assert.fail("Rebase should have failed."); + }, function (error) { + if (error && error.errno === NodeGit.Error.CODE.EAPPLIED) { + return; + } + + assert.fail(error); + + throw error; + }) + .then(function() { + return repository.continueRebase(); + }) + .then(function() { + return rebase.next(); + }) + .catch(function(error) { + assert.equal(error.errno, NodeGit.Error.CODE.ITEROVER); + }); + }); + + + it("rebase signs correctly if rebaseOptions are re-used", function () { + const ourFileName = "ourNewFile.txt"; + const theirFileName = "theirNewFile.txt"; + + const ourFileContent = "I like Toll Roads. I have an EZ-Pass!"; + const theirFileContent = "I'm skeptical about Toll Roads"; + + const ourSignature = NodeGit.Signature.create + ("Ron Paul", "RonPaul@TollRoadsRBest.info", 123456789, 60); + const theirSignature = NodeGit.Signature.create + ("Greg Abbott", "Gregggg@IllTollYourFace.us", 123456789, 60); + + let ourCommit; + let theirCommit; + + let rebase; + let newCommitOid; + + const rebaseOptions = { + signingCb: () => ({ + code: NodeGit.Error.CODE.OK, + field: "moose-sig", + signedData: "A moose was here." + }) + }; + + const repository = this.repository; + + // Create two commits on master + // one + return fse.writeFile(path.join(repository.workdir(), ourFileName), + ourFileContent) + .then(() => RepoUtils.addFileToIndex(repository, ourFileName)) + .then((oid) => { + assert.equal(oid.toString(), + "11ead82b1135b8e240fb5d61e703312fb9cc3d6a"); + return repository.createCommit("HEAD", ourSignature, ourSignature, + "we made a commit", oid, []); + }) + .then((commitOid) => { + assert.equal(commitOid.toString(), + "91a183f87842ebb7a9b08dad8bc2473985796844"); + return repository.getCommit(commitOid); + }) + .then((_ourCommit) => { + ourCommit = _ourCommit; + return fse.writeFile(path.join(repository.workdir(), theirFileName), + theirFileContent); + }) + .then(() => RepoUtils.addFileToIndex(repository, theirFileName)) + .then((oid) => { + assert.equal(oid.toString(), + "76631cb5a290dafe2959152626bb90f2a6d8ec94"); + return repository.createCommit("HEAD", theirSignature, + theirSignature, "they made a commit", oid, [ourCommit]); + }) + .then((commitOid) => { + assert.equal(commitOid.toString(), + "0e9231d489b3f4303635fc4b0397830da095e7e7"); + return repository.getCommit(commitOid); + + }) + .then((_theirCommit) => { + theirCommit = _theirCommit; + return Promise.all([ + NodeGit.AnnotatedCommit.lookup( + repository, + ourCommit.id() + ), + NodeGit.AnnotatedCommit.lookup( + repository, + theirCommit.id() + ) + ]); + }) + // rebase latest commit + .then(([ourAnnotatedCommit, theirAnnotatedCommit]) => + NodeGit.Rebase.init( + repository, + // branch, upstream, onto + theirAnnotatedCommit, ourAnnotatedCommit, null, + rebaseOptions // use once + )) + .then(() => { + return NodeGit.Rebase.open( + repository, + rebaseOptions // use twice + ); + }) + .then((_rebase) => { + rebase = _rebase; + return rebase.next(); + }) + .then(() => { + const operationCurrentIndex = rebase.operationCurrent(); + assert(operationCurrentIndex === 0); + // Make sure we don't crash calling the signature CB + // after collecting garbage. + garbageCollect(); + return rebase.commit(null, ourSignature); + }) + .then((_newCommitOid) => { + newCommitOid = _newCommitOid; + assert.strictEqual(newCommitOid.toString(), + "9909e435b52322a71dc341d747b29c392a34c745"); + return rebase.next(); + }) + .then(() => { + assert.fail("should throw"); + }) + .catch((error) => { + assert(error.errno === NodeGit.Error.CODE.ITEROVER); + assert.strictEqual(rebase.finish(ourSignature), 0); + return NodeGit.Commit.extractSignature( + repository, + newCommitOid.toString(), + "moose-sig" + ); + }) + .then((sig) => { + assert.strictEqual(sig.signature, "A moose was here."); + }); +}); }); diff --git a/test/tests/refs.js b/test/tests/refs.js index 68b114e85f..1cad9f4084 100644 --- a/test/tests/refs.js +++ b/test/tests/refs.js @@ -109,7 +109,7 @@ describe("Reference", function() { }) .then(function(reflog) { var refEntryMessage = reflog - .entryByIndex(reflog.entrycount() - 1) + .entryByIndex(0) .message(); // The reflog should have the message passed to // the rename diff --git a/test/tests/remote.js b/test/tests/remote.js index ff6004c666..c9bf8192c7 100644 --- a/test/tests/remote.js +++ b/test/tests/remote.js @@ -7,6 +7,8 @@ var fp = require("lodash/fp"); var garbageCollect = require("../utils/garbage_collect.js"); var RepoUtils = require("../utils/repository_setup"); +const isNode8 = process.versions.node.split(".")[0] === "8"; + describe("Remote", function() { var NodeGit = require("../../"); var Repository = NodeGit.Repository; @@ -19,7 +21,7 @@ describe("Remote", function() { var privateUrl = "git@github.com:nodegit/private"; function removeNonOrigins(repo) { - return repo.getRemotes() + return repo.getRemoteNames() .then(function(remotes) { return remotes.reduce(function(promise, remote) { if (remote !== "origin") { @@ -99,6 +101,31 @@ describe("Remote", function() { }); }); + it("can rename a remote", function() { + var repository = this.repository; + + return Remote.list(repository) + .then(function(remoteNames) { + assert.deepEqual(remoteNames, ["origin"]); + return Remote.rename(repository, "origin", "origin2"); + }) + .then(function(problems) { + assert.deepEqual(problems, []); + return Remote.list(repository); + }) + .then(function(remoteNames) { + assert.deepEqual(remoteNames, ["origin2"]); + return Remote.rename(repository, "origin2", "origin"); + }) + .then(function(problems) { + assert.deepEqual(problems, []); + return Remote.list(repository); + }) + .then(function(remoteNames) { + assert.deepEqual(remoteNames, ["origin"]); + }); + }); + it("can delete a remote", function() { var repository = this.repository; @@ -120,9 +147,7 @@ describe("Remote", function() { return repo.getRemote("origin") .then(function(remote) { remoteCallbacks = { - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 }; return remote.connect(NodeGit.Enums.DIRECTION.FETCH, remoteCallbacks) @@ -199,11 +224,9 @@ describe("Remote", function() { var fetchOpts = { callbacks: { credentials: function(url, userName) { - return NodeGit.Cred.sshKeyFromAgent(userName); - }, - certificateCheck: function() { - return 1; + return NodeGit.Credential.sshKeyFromAgent(userName); }, + certificateCheck: () => 0, transferProgress: function() { wasCalled = true; @@ -222,9 +245,7 @@ describe("Remote", function() { it("can get the default branch of a remote", function() { var remoteCallbacks = { - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 }; var remote = this.remote; @@ -240,11 +261,9 @@ describe("Remote", function() { return this.repository.fetch("origin", { callbacks: { credentials: function(url, userName) { - return NodeGit.Cred.sshKeyFromAgent(userName); + return NodeGit.Credential.sshKeyFromAgent(userName); }, - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } }); }); @@ -254,16 +273,14 @@ describe("Remote", function() { var fetchOptions = { callbacks: { credentials: function(url, userName) { - return NodeGit.Cred.sshKeyNew( + return NodeGit.Credential.sshKeyNew( userName, path.resolve("./test/nodegit-test-rsa.pub"), path.resolve("./test/nodegit-test-rsa"), "" ); }, - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } }; @@ -285,12 +302,10 @@ describe("Remote", function() { credentials: function(url, userName) { if (firstPass) { firstPass = false; - return NodeGit.Cred.sshKeyFromAgent(userName); + return NodeGit.Credential.sshKeyFromAgent(userName); } }, - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } }; @@ -321,20 +336,19 @@ describe("Remote", function() { return repository.fetchAll({ callbacks: { credentials: function(url, userName) { - return NodeGit.Cred.sshKeyFromAgent(userName); + return NodeGit.Credential.sshKeyFromAgent(userName); }, - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } }); }); }); - it("will reject if credentials promise rejects", function() { - var repo = this.repository; - var branch = "should-not-exist"; - return Remote.lookup(repo, "origin") + if (!isNode8) { + it("will reject if credentials promise rejects", function() { + var repo = this.repository; + var branch = "should-not-exist"; + return Remote.lookup(repo, "origin") .then(function(remote) { var ref = "refs/heads/" + branch; var refs = [ref + ":" + ref]; @@ -342,17 +356,15 @@ describe("Remote", function() { callbacks: { credentials: function(url, userName) { var test = Promise.resolve("test") - .then(function() { return; }) - .then(function() { return; }) - .then(function() { return; }) - .then(function() { - return Promise.reject(new Error("failure case")); - }); + .then(function() { return; }) + .then(function() { return; }) + .then(function() { return; }) + .then(function() { + return Promise.reject(new Error("failure case")); + }); return test; }, - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 } }; return remote.push(refs, options); @@ -360,105 +372,107 @@ describe("Remote", function() { .then(function() { return Promise.reject( new Error("should not be able to push to the repository")); - }, function(err) { - if (err.message === "failure case") - { - return Promise.resolve(); - } else { - throw err; - } - }) - .then(function() { - return Remote.lookup(repo, "origin"); - }) - .then(function(remote) { - var ref = "refs/heads/" + branch; - var refs = [ref + ":" + ref]; - var options = { - callbacks: { - credentials: function(url, userName) { - var test = Promise.resolve() + }, function(err) { + if (err.message === "failure case") + { + return Promise.resolve(); + } else { + throw err; + } + }) + .then(function() { + return Remote.lookup(repo, "origin"); + }) + .then(function(remote) { + var ref = "refs/heads/" + branch; + var refs = [ref + ":" + ref]; + var options = { + callbacks: { + credentials: function(url, userName) { + var test = Promise.resolve() .then(Promise.resolve.bind(Promise)) .then(Promise.resolve.bind(Promise)) .then(Promise.resolve.bind(Promise)) .then(Promise.reject.bind(Promise)); - return test; - }, - certificateCheck: function() { - return 1; + return test; + }, + certificateCheck: () => 0 } - } - }; - return remote.push(refs, options); - }) - .then(function() { - return Promise.reject( - new Error("should not be able to push to the repository")); - }, function(err) { - if (err.message === "Method push has thrown an error.") - { - return Promise.resolve(); - } else { - throw err; - } - }); - }); + }; + return remote.push(refs, options); + }) + .then(function() { + return Promise.reject( + new Error("should not be able to push to the repository")); + }, function(err) { + if (err.message === "Method push has thrown an error.") + { + return Promise.resolve(); + } else { + throw err; + } + }); + }); - it("cannot push to a repository with invalid credentials", function() { - var repo = this.repository; - var branch = "should-not-exist"; - return Remote.lookup(repo, "origin") - .then(function(remote) { - var ref = "refs/heads/" + branch; - var refs = [ref + ":" + ref]; - var firstPass = true; - var options = { - callbacks: { - credentials: function(url, userName) { - if (firstPass) { - firstPass = false; - if (url.indexOf("https") === -1) { - return NodeGit.Cred.sshKeyFromAgent(userName); - } else { - return NodeGit.Cred.userpassPlaintextNew(userName, ""); - } + it("cannot push to a repository with invalid credentials", function() { + var repo = this.repository; + var branch = "should-not-exist"; + return Remote.lookup(repo, "origin") + .then(function(remote) { + var ref = "refs/heads/" + branch; + var refs = [ref + ":" + ref]; + var firstPass = true; + var options = { + callbacks: { + credentials: function(url, userName) { + if (firstPass) { + firstPass = false; + if (url.indexOf("https") === -1) { + return NodeGit.Credential.sshKeyFromAgent(userName); + } else { + return NodeGit.Credential + .userpassPlaintextNew(userName, ""); + } + } else { + return Promise.reject(); + } + }, + certificateCheck: () => 0 + } + }; + return remote.push(refs, options); + }) + // takes care of windows bug, see the .catch for the proper pathway + // that this flow should take (cred cb doesn't run twice -> + // throws error) + .then(function() { + return Promise.reject( + new Error("should not be able to push to the repository")); + }, function(err) { + if (err.message.indexOf(401) === -1) { + throw err; } else { - return Promise.reject(); + return Promise.resolve(); } - }, - certificateCheck: function() { - return 1; - } - } - }; - return remote.push(refs, options); - }) - // takes care of windows bug, see the .catch for the proper pathway - // that this flow should take (cred cb doesn't run twice -> throws error) - .then(function() { - return Promise.reject( - new Error("should not be able to push to the repository")); - }, function(err) { - if (err.message.indexOf(401) === -1) { - throw err; - } else { - return Promise.resolve(); - } - }) - // catches linux / osx failure to use anonymous credentials - // stops callback infinite loop - .catch(function (reason) { - const messageWithoutNewlines = reason.message.replace(/\n|\r/g, ""); - const validErrors = [ - "Method push has thrown an error.", - "failed to set credentials: The parameter is incorrect." - ]; - assert.ok( - _.includes(validErrors, messageWithoutNewlines), - "Unexpected error: " + reason.message - ); - }); - }); + }) + // catches linux / osx failure to use anonymous credentials + // stops callback infinite loop + .catch(function (reason) { + const messageWithoutNewlines = reason.message.replace( + /\n|\r/g, + "" + ); + const validErrors = [ + "Method push has thrown an error.", + "failed to set credentials: The parameter is incorrect." + ]; + assert.ok( + _.includes(validErrors, messageWithoutNewlines), + "Unexpected error: " + reason.message + ); + }); + }); + } it("is kept alive by refspec", function() { var repo = this.repository; diff --git a/test/tests/repository.js b/test/tests/repository.js index e960629ac9..29f2075099 100644 --- a/test/tests/repository.js +++ b/test/tests/repository.js @@ -11,6 +11,7 @@ describe("Repository", function() { var Index = NodeGit.Index; var Signature = NodeGit.Signature; + var constReposPath = local("../repos/constworkdir"); var reposPath = local("../repos/workdir"); var newRepoPath = local("../repos/newrepo"); var emptyRepoPath = local("../repos/empty"); @@ -18,7 +19,13 @@ describe("Repository", function() { beforeEach(function() { var test = this; - return Repository.open(reposPath) + return Repository.open(constReposPath) + .then(function(constRepository) { + test.constRepository = constRepository; + }) + .then(function() { + return Repository.open(reposPath); + }) .then(function(repository) { test.repository = repository; }) @@ -95,7 +102,7 @@ describe("Repository", function() { }); it("can list remotes", function() { - return this.repository.getRemotes() + return this.repository.getRemoteNames() .then(function(remotes) { assert.equal(remotes.length, 1); assert.equal(remotes[0], "origin"); @@ -120,9 +127,10 @@ describe("Repository", function() { }); it("can get the default signature", function() { - var sig = this.repository.defaultSignature(); - - assert(sig instanceof Signature); + this.repository.defaultSignature() + .then((sig) => { + assert(sig instanceof Signature); + }); }); it("gets statuses with StatusFile", function() { @@ -183,11 +191,9 @@ describe("Repository", function() { return repo.fetch("origin", { credentials: function(url, userName) { - return NodeGit.Cred.sshKeyFromAgent(userName); + return NodeGit.Credential.sshKeyFromAgent(userName); }, - certificateCheck: function() { - return 1; - } + certificateCheck: () => 0 }) .then(function() { return repo.fetchheadForeach(function(refname, remoteUrl, oid, isMerge) { @@ -209,13 +215,23 @@ describe("Repository", function() { }); }); - it("can discover if a path is part of a repository", function() { + function discover(ceiling) { var testPath = path.join(reposPath, "lib", "util", "normalize_oid.js"); var expectedPath = path.join(reposPath, ".git"); - return NodeGit.Repository.discover(testPath, 0, "") + return NodeGit.Repository.discover(testPath, 0, ceiling) .then(function(foundPath) { assert.equal(expectedPath, foundPath); }); + } + + it("can discover if a path is part of a repository, null ceiling", + function() { + return discover(null); + }); + + it("can discover if a path is part of a repository, empty ceiling", + function() { + return discover(""); }); it("can create a repo using initExt", function() { @@ -265,16 +281,21 @@ describe("Repository", function() { }); it("can commit on head on a empty repo with createCommitOnHead", function() { - var fileName = "my-new-file-that-shouldnt-exist.file"; - var fileContent = "new file from repository test"; - var repo = this.emptyRepo; - var filePath = path.join(repo.workdir(), fileName); - var authSig = repo.defaultSignature(); - var commitSig = repo.defaultSignature(); - var commitMsg = "Doug this has been commited"; - - return fse.writeFile(filePath, fileContent) - .then(function() { + const fileName = "my-new-file-that-shouldnt-exist.file"; + const fileContent = "new file from repository test"; + const repo = this.emptyRepo; + const filePath = path.join(repo.workdir(), fileName); + const commitMsg = "Doug this has been commited"; + let authSig; + let commitSig; + + return repo.defaultSignature() + .then((sig) => { + authSig = sig; + commitSig = sig; + return fse.writeFile(filePath, fileContent); + }) + .then(() => { return repo.createCommitOnHead( [fileName], authSig, @@ -282,7 +303,7 @@ describe("Repository", function() { commitMsg ); }) - .then(function(oidResult) { + .then((oidResult) => { return repo.getHeadCommit() .then(function(commit) { assert.equal( @@ -336,4 +357,38 @@ describe("Repository", function() { assert.equal(numMergeHeads, 1); }); }); + + it("can obtain statistics from a valid constant repository", function() { + return this.constRepository.statistics() + .then(function(analysisReport) { + + assert.equal(analysisReport.repositorySize.commits.count, 992); + assert.equal(analysisReport.repositorySize.commits.size, 265544); + assert.equal(analysisReport.repositorySize.trees.count, 2416); + assert.equal(analysisReport.repositorySize.trees.size, 1188325); + assert.equal(analysisReport.repositorySize.trees.entries, 32571); + assert.equal(analysisReport.repositorySize.blobs.count, 4149); + assert.equal(analysisReport.repositorySize.blobs.size, 48489622); + assert.equal(analysisReport.repositorySize.annotatedTags.count, 1); + assert.equal(analysisReport.repositorySize.references.count, 8); + + assert.equal(analysisReport.biggestObjects.commits.maxSize, 956); + assert.equal(analysisReport.biggestObjects.commits.maxParents, 2); + assert.equal(analysisReport.biggestObjects.trees.maxEntries, 93); + assert.equal(analysisReport.biggestObjects.blobs.maxSize, 1077756); + + assert.equal(analysisReport.historyStructure.maxDepth, 931); + assert.equal(analysisReport.historyStructure.maxTagDepth, 1); + + assert.equal(analysisReport.biggestCheckouts.numDirectories, 128); + assert.equal(analysisReport.biggestCheckouts.maxPathDepth, 10); + assert.equal(analysisReport.biggestCheckouts.maxPathLength, 107); + assert.equal(analysisReport.biggestCheckouts.numFiles, 514); + assert.equal(analysisReport.biggestCheckouts.totalFileSize, 5160886); + assert.equal(analysisReport.biggestCheckouts.numSymlinks, 2); + assert.equal(analysisReport.biggestCheckouts.numSubmodules, 4); + + // console.log(JSON.stringify(analysisReport,null,2)); + }); + }); }); diff --git a/test/tests/reset.js b/test/tests/reset.js index 47cd40779b..214bf08fd1 100644 --- a/test/tests/reset.js +++ b/test/tests/reset.js @@ -269,4 +269,43 @@ describe("Reset", function() { return Reset.reset(test.repo, test.currentCommit, Reset.TYPE.HARD); }); }); + + it("reset fails if parameter is not a Commit object", function() { + var test = this; + var commit = test.repo.getReferenceCommit("master"); + try { + Reset.reset(test.repo, commit, Reset.TYPE.HARD); + assert.fail( + "Should not be able to pass a Promise (Commit) into the function" + ); + } catch (err) { + // ok + assert.equal( + "Repository and target commit's repository does not match", + err.message + ); + } + }); + + it("reset fails if originating repository is not the same", function() { + var test = this; + var testCommit = null; + return test.repo.getReferenceCommit("master") + .then(function(commit) { + testCommit = commit; + return Repository.open(reposPath); + }) + .then(function(repo) { + return Reset.reset(repo, testCommit, Reset.TYPE.HARD); + }) + .then(function() { + assert.fail("Different source repository instance should fail"); + }) + .catch(function(err) { + assert.equal( + "Repository and target commit's repository does not match", + err.message + ); + }); + }); }); diff --git a/test/tests/revert.js b/test/tests/revert.js index 8733c870fd..accb529cae 100644 --- a/test/tests/revert.js +++ b/test/tests/revert.js @@ -10,6 +10,7 @@ describe("Revert", function() { var Revert = NodeGit.Revert; var RevertOptions = NodeGit.RevertOptions; + var Status = NodeGit.Status; var test; var fileName = "foobar.js"; @@ -37,28 +38,27 @@ describe("Revert", function() { var fileStats = fs.statSync(path.join(repoPath, fileName)); assert.ok(fileStats.isFile()); - Revert.revert(test.repository, test.firstCommit, new RevertOptions()) + return Revert.revert(test.repository, test.firstCommit, new RevertOptions()) .then(function() { try { fs.statSync(path.join(repoPath, fileName)); - assert.fail("Working directory was not reverted"); - } - catch (error) { - // pass + } catch (e) { + // we expect this not to exist + return; } + + assert.fail("Working directory was not reverted"); }); }); it("revert modifies the index", function() { - Revert.revert(test.repository, test.firstCommit, new RevertOptions()) - .then(function() { - return test.repository.index(); - }) - .then(function(index) { - var entries = index.entries; - assert.equal(1, entries.length); - assert.ok(_.endsWith(fileName, entries[0].path)); - }); + return Revert.revert(test.repository, test.firstCommit, new RevertOptions()) + .then(() => test.repository.getStatus()) + .then((status) => { + assert.equal(1, status.length); + assert.ok(_.endsWith(fileName, status[0].path())); + assert.equal(Status.STATUS.INDEX_DELETED, status[0].statusBit()); + }); }); it("RevertOptions is optional (unspecified)", function() { @@ -74,4 +74,11 @@ describe("Revert", function() { throw error; }); }); + + it("RevertOptions without MergeOptions should not segfault", function() { + return Revert.revert(test.repository, test.firstCommit, {}) + .catch(function(error) { + throw error; + }); + }); }); diff --git a/test/tests/revwalk.js b/test/tests/revwalk.js index 6c58651939..bb1cd06ee5 100644 --- a/test/tests/revwalk.js +++ b/test/tests/revwalk.js @@ -234,6 +234,7 @@ describe("Revwalk", function() { }); magicShas = [ + "be6905d459f1b236e44b2445df25aff1783993e9", "4a34168b80fe706f52417106821c9cbfec630e47", "f80e085e3118bbd6aad49dad7c53bdc37088bf9b", "694b2d703a02501f288269bea7d1a5d643a83cc8", @@ -344,7 +345,9 @@ describe("Revwalk", function() { var test = this; return leakTest(NodeGit.Revwalk, function() { - return Promise.resolve(NodeGit.Revwalk.create(test.repository)); + const walker = test.repository.createRevWalk(); + walker.push("115d114e2c4d5028c7a78428f16a4528c51be7dd"); + return walker.next(); }); }); diff --git a/test/tests/signature.js b/test/tests/signature.js index bf5f99ef70..e0387c3d3b 100644 --- a/test/tests/signature.js +++ b/test/tests/signature.js @@ -45,37 +45,39 @@ describe("Signature", function() { var savedUserName; var savedUserEmail; - var cleanUp = function() { + var cleanUp = () => { return exec("git config --global user.name \"" + savedUserName + "\"") - .then(function() { + .then(() => { exec("git config --global user.email \"" + savedUserEmail + "\""); }); }; return exec("git config --global user.name") - .then(function(userName) { + .then((userName) => { savedUserName = userName.trim(); return exec("git config --global user.email"); }) - .then(function(userEmail) { + .then((userEmail) => { savedUserEmail = userEmail.trim(); return exec("git config --global --unset user.name"); }) - .then(function() { + .then(() => { return exec("git config --global --unset user.email"); }) - .then(function() { + .then(() => { return Repository.open(reposPath); }) - .then(function(repo) { - var sig = repo.defaultSignature(); + .then((repo) => { + return repo.defaultSignature(); + }) + .then((sig) => { assert.equal(sig.name(), "unknown"); assert.equal(sig.email(), "unknown@example.com"); }) .then(cleanUp) - .catch(function(e) { + .catch((e) => { return cleanUp() .then(function() { return Promise.reject(e); @@ -104,4 +106,29 @@ describe("Signature", function() { // the self-freeing time should get freed assert.equal(startSelfFreeingCount, endSelfFreeingCount); }); + + it("toString does not provide a timestamp by default", function () { + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + + assert.equal(signature.toString(), "Shaggy Rogers "); + }); + + it("toString provides the correct timestamp when requested", function() { + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + + assert.equal( + signature.toString(true), + "Shaggy Rogers 987654321 +0130" + ); + }); }); diff --git a/test/tests/stash.js b/test/tests/stash.js index 5a65a9a167..811a0c5091 100644 --- a/test/tests/stash.js +++ b/test/tests/stash.js @@ -31,22 +31,23 @@ describe("Stash", function() { }); function saveDropStash(repo, stashMessage) { - var fileName = "README.md"; - var fileContent = "Cha-cha-cha-chaaaaaangessssss"; - var filePath = path.join(repo.workdir(), fileName); - var oldContent; - var stashes = []; - var stashOid; + const fileName = "README.md"; + const fileContent = "Cha-cha-cha-chaaaaaangessssss"; + const filePath = path.join(repo.workdir(), fileName); + let oldContent; + let stashes = []; + let stashOid; return fse.readFile(filePath) - .then(function(content) { + .then((content) => { oldContent = content; return fse.writeFile(filePath, fileContent); }) - .then(function() { - return Stash.save(repo, repo.defaultSignature(), stashMessage, 0); + .then(() => repo.defaultSignature()) + .then((signature) => { + return Stash.save(repo, signature, stashMessage, 0); }) - .then(function(oid) { + .then((oid) => { stashOid = oid; var stashCb = function(index, message, oid) { stashes.push({index: index, message: message, oid: oid}); @@ -54,114 +55,119 @@ describe("Stash", function() { return Stash.foreach(repo, stashCb); }) - .then(function() { + .then(() => { assert.equal(stashes.length, 1); assert.equal(stashes[0].index, 0); - assert.equal(stashes[0].message, "On master: " + stashMessage); + const expectedMessage = !stashMessage ? + "WIP on master: 32789a7 Fixes EJS not being installed via NPM" : + "On master: " + stashMessage; + assert.equal(stashes[0].message, expectedMessage); assert.equal(stashes[0].oid.toString(), stashOid.toString()); return Stash.drop(repo, 0); }) - .then(function () { + .then(() => { stashes = []; - var stashCb = function(index, message, oid) { + var stashCb = (index, message, oid) => { stashes.push({index: index, message: message, oid: oid}); }; return Stash.foreach(repo, stashCb); }) - .then(function() { + .then(() => { assert.equal(stashes.length, 0); }) - .catch(function(e) { + .catch((e) => { return fse.writeFile(filePath, oldContent) - .then(function() { + .then(() => { return Promise.reject(e); }); }); } it("can save and drop a stash", function() { - saveDropStash(this.repository, "stash test"); + return saveDropStash(this.repository, "stash test"); }); it("can save a stash with no message and drop it", function() { - saveDropStash(this.repository, null); + return saveDropStash(this.repository, null); }); it("can save and pop a stash", function() { - var fileNameA = "README.md"; - var fileNameB = "install.js"; - var oldContentA; - var oldContentB; - var fileContent = "Cha-cha-cha-chaaaaaangessssss"; - var repo = this.repository; - var filePathA = path.join(repo.workdir(), fileNameA); - var filePathB = path.join(repo.workdir(), fileNameB); - var stashMessage = "stash test"; + const fileNameA = "README.md"; + const fileNameB = "install.js"; + let oldContentA; + let oldContentB; + const fileContent = "Cha-cha-cha-chaaaaaangessssss"; + const repo = this.repository; + const filePathA = path.join(repo.workdir(), fileNameA); + const filePathB = path.join(repo.workdir(), fileNameB); + const stashMessage = "stash test"; return fse.readFile(filePathA, "utf-8") - .then(function(content) { + .then((content) => { oldContentA = content; return fse.writeFile(filePathA, fileContent); }) - .then(function() { + .then(() => { return fse.readFile(filePathB, "utf-8"); }) - .then(function(content) { + .then((content) => { oldContentB = content; return fse.writeFile(filePathB, fileContent); }) - .then(function() { - return Stash.save(repo, repo.defaultSignature(), stashMessage, 0); + .then(() => repo.defaultSignature()) + .then((signature) => { + return Stash.save(repo, signature, stashMessage, 0); }) - .then(function() { + .then(() => { return fse.readFile(filePathA, "utf-8"); }) - .then(function(content) { + .then((content) => { assert.equal(oldContentA, content); return fse.readFile(filePathB, "utf-8"); }) - .then(function(content) { + .then((content) => { assert.equal(oldContentB, content); return Stash.pop(repo, 0); }) - .then(function() { + .then(() => { return fse.readFile(filePathA, "utf-8"); }) - .then(function(content) { + .then((content) => { assert.equal(fileContent, content); return fse.readFile(filePathB, "utf-8"); }) - .then(function(content) { + .then((content) => { assert.equal(fileContent, content); }); }); it("can save a stash, change files, and fail to pop stash", function() { - var fileName = "README.md"; - var fileContent = "Cha-cha-cha-chaaaaaangessssss"; - var fileContent2 = "Somewhere over the repo, changes were made."; - var repo = this.repository; - var filePath = path.join(repo.workdir(), fileName); - var oldContent; - var stashMessage = "stash test"; + const fileName = "README.md"; + const fileContent = "Cha-cha-cha-chaaaaaangessssss"; + const fileContent2 = "Somewhere over the repo, changes were made."; + const repo = this.repository; + const filePath = path.join(repo.workdir(), fileName); + let oldContent; + const stashMessage = "stash test"; return fse.readFile(filePath) - .then(function(content) { + .then((content) => { oldContent = content; return fse.writeFile(filePath, fileContent); }) - .then(function() { - return Stash.save(repo, repo.defaultSignature(), stashMessage, 0); + .then(() => repo.defaultSignature()) + .then((signature) => { + return Stash.save(repo, signature, stashMessage, 0); }) - .then(function() { + .then(() => { return fse.writeFile(filePath, fileContent2); }) - .then(function() { + .then(() => { return Stash.pop(repo, 0); }) - .catch(function(reason) { + .catch((reason) => { if (reason.message !== "1 conflict prevents checkout") { throw reason; } else { @@ -171,81 +177,154 @@ describe("Stash", function() { }); it("can save, apply, then drop the stash", function() { - var fileName = "README.md"; - var fileContent = "Cha-cha-cha-chaaaaaangessssss"; - var repo = this.repository; - var filePath = path.join(repo.workdir(), fileName); - var oldContent; - var stashMessage = "stash test"; + const fileName = "README.md"; + const fileContent = "Cha-cha-cha-chaaaaaangessssss"; + const repo = this.repository; + const filePath = path.join(repo.workdir(), fileName); + let oldContent; + const stashMessage = "stash test"; return fse.readFile(filePath) - .then(function(content) { + .then((content) => { oldContent = content; return fse.writeFile(filePath, fileContent); }) - .then(function() { - return Stash.save(repo, repo.defaultSignature(), stashMessage, 0); + .then(() => repo.defaultSignature()) + .then((signature) => { + return Stash.save(repo, signature, stashMessage, 0); }) - .then(function() { + .then(() => { return Stash.apply(repo, 0); }) - .then(function() { + .then(() => { return Stash.drop(repo, 0); - }, function() { + }, () => { throw new Error("Unable to drop stash after apply."); }) - .then(function() { + .then(() => { return Stash.drop(repo, 0); }) - .catch(function(reason) { - if (reason.message !== "Reference 'refs/stash' not found") { - Promise.reject(); + .catch((reason) => { + if (reason.message !== "reference 'refs/stash' not found") { + throw reason; } }); }); it("can save multiple stashes and pop an arbitrary stash", function() { - var fileName = "README.md"; - var fileContentA = "Hi. It's me. I'm the dog. My name is the dog."; - var fileContentB = "Everyone likes me. I'm cute."; - var fileContentC = "I think I will bark at nothing now. Ba. Ba. Baba Baba."; - var repo = this.repository; - var filePath = path.join(repo.workdir(), fileName); - var oldContent; - var stashMessageA = "stash test A"; - var stashMessageB = "stash test B"; - var stashMessageC = "stash test C"; - - function writeAndStash(path, content, message) { + const fileName = "README.md"; + const fileContentA = "Hi. It's me. I'm the dog. My name is the dog."; + const fileContentB = "Everyone likes me. I'm cute."; + const fileContentC = + "I think I will bark at nothing now. Ba. Ba. Baba Baba."; + const repo = this.repository; + const filePath = path.join(repo.workdir(), fileName); + let oldContent; + const stashMessageA = "stash test A"; + const stashMessageB = "stash test B"; + const stashMessageC = "stash test C"; + + const writeAndStash = (path, content, message) => { return fse.writeFile(path, content) - .then(function() { - return Stash.save(repo, repo.defaultSignature(), message, 0); + .then(() => repo.defaultSignature()) + .then((signature) => { + return Stash.save(repo, signature, message, 0); }); - } + }; return fse.readFile(filePath, "utf-8") - .then(function (content) { + .then((content) => { oldContent = content; return writeAndStash(filePath, fileContentA, stashMessageA); }) - .then(function() { + .then(() => { return writeAndStash(filePath, fileContentB, stashMessageB); }) - .then(function() { + .then(() => { return writeAndStash(filePath, fileContentC, stashMessageC); }) - .then(function() { + .then(() => { return fse.readFile(filePath, "utf-8"); }) - .then(function(content) { + .then((content) => { assert.equal(oldContent, content); return Stash.pop(repo, 1); }) - .then(function() { + .then(() => { return fse.readFile(filePath, "utf-8"); }) - .then(function(content) { + .then((content) => { + assert.equal(fileContentB, content); + }); + }); + + it("can partial stash the workdir and pop it", function() { + const repo = this.repository; + + const fileName1 = "README.md"; + const fileName2 = "install.js"; + const fileName3 = "LICENSE"; + + const fileContentA = "Hi. It's me. I'm the dog. My name is the dog."; + const fileContentB = "Everyone likes me. I'm cute."; + + let oldContentA; + let oldContentB; + let oldContentC; + + const filePath1 = path.join(repo.workdir(), fileName1); + const filePath2 = path.join(repo.workdir(), fileName2); + const filePath3 = path.join(repo.workdir(), fileName3); + + const options = { + flags: 0, + message: "stast test", + paths: [fileName1, fileName2] + }; + + return fse.readFile(filePath1, "utf-8") + .then((content) => { + oldContentA = content; + return fse.readFile(filePath2, "utf-8"); + }) + .then((content) => { + oldContentB = content; + return fse.readFile(filePath3, "utf-8"); + }) + .then((content) => { + oldContentC = content; + return fse.writeFile(filePath1, fileContentA); + }) + .then(() => fse.writeFile(filePath2, fileContentB)) + .then(() => repo.defaultSignature()) + .then((signature) => { + options.stasher = signature; + return Stash.saveWithOpts(repo, options); + }) + .then(() => fse.readFile(filePath1, "utf-8")) + .then((content) => { + assert.equal(oldContentA, content); + return fse.readFile(filePath2, "utf-8"); + }) + .then((content) => { + assert.equal(oldContentB, content); + return fse.readFile(filePath3, "utf-8"); + }) + .then((content) => { + assert.equal(oldContentC, content); + return Stash.pop(repo, 0); + }) + .then(() => fse.readFile(filePath1, "utf-8")) + .then((content) => { + assert.equal(fileContentA, content); + return fse.readFile(filePath2, "utf-8"); + }) + .then((content) => { assert.equal(fileContentB, content); + return fse.readFile(filePath3, "utf-8"); + }) + .then((content) => { + assert.equal(oldContentC, content); }); }); }); diff --git a/test/tests/submodule.js b/test/tests/submodule.js index f42758b124..2323fa56ca 100644 --- a/test/tests/submodule.js +++ b/test/tests/submodule.js @@ -157,4 +157,38 @@ describe("Submodule", function() { assert.equal(entries[1].path, submodulePath); }); }); + + it("can run sync callback without deadlocking", function() { + var repo = this.workdirRepository; + var submodules = []; + var submoduleCallback = function(submodule, name, payload) { + var submoduleName = submodule.name(); + assert.equal(submoduleName, name); + submodules.push(name); + }; + + return Submodule.foreach(repo, submoduleCallback).then(function() { + assert.equal(submodules.length, 1); + }); + }); + + // 'Submodule.foreach' and 'Submodule.lookup' do work with the repo locked. + // They should work together without deadlocking. + it("can run async callback without deadlocking", function() { + var repo = this.workdirRepository; + var submodules = []; + var submoduleCallback = function(submodule, name, payload) { + var owner = submodule.owner(); + + return Submodule.lookup(owner, name) + .then(function(submodule) { + assert.equal(submodule.name(), name); + submodules.push(name); + }); + }; + + return Submodule.foreach(repo, submoduleCallback).then(function() { + assert.equal(submodules.length, 1); + }); + }); }); diff --git a/test/tests/tag.js b/test/tests/tag.js index 8d2c48f202..21acdcc1bc 100644 --- a/test/tests/tag.js +++ b/test/tests/tag.js @@ -159,11 +159,15 @@ describe("Tag", function() { it("can create a new signed tag with Tag.create and delete it", function() { var name = "created-signed-tag-create"; var repository = this.repository; - var signature = Signature.default(repository); + var signature = null; var commit = null; var commit2 = null; - return repository.getCommit(commitPointedTo) + return Signature.default(repository) + .then(function(signatureResult) { + signature = signatureResult; + return repository.getCommit(commitPointedTo); + }) .then(function(theCommit) { commit = theCommit; return repository.getCommit(commitPointedTo2); @@ -207,20 +211,443 @@ describe("Tag", function() { }); }); + it("can create a Tag buffer", function() { + const targetOid = Oid.fromString(commitPointedTo); + const name = "created-signed-tag-annotationCreate"; + const repository = this.repository; + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + const message = "I'm a teapot"; + + return Tag.createBuffer(repository, name, targetOid, signature, message) + .then((tagBuffer) => { + const lines = tagBuffer.split("\n"); + assert.equal(7, lines.length); + assert.equal(lines[0], `object ${commitPointedTo}`); + assert.equal(lines[1], "type commit"); + assert.equal(lines[2], `tag ${name}`); + assert.equal( + lines[3], + "tagger Shaggy Rogers 987654321 +0130" + ); + assert.equal(lines[4], ""); + assert.equal(lines[5], message); + assert.equal(lines[6], ""); + }); + }); + + it("can create a Tag from a Tag buffer", function() { + const targetOid = Oid.fromString(commitPointedTo); + const otherTargetOid = Oid.fromString(commitPointedTo2); + const name = "created-signed-tag-annotationCreate"; + const repository = this.repository; + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + const message = "I'm a teapot"; + + let odb; + let buffer; + let otherBuffer; + + return repository.odb() + .then((odbResult) => { + odb = odbResult; + return Tag.createBuffer( + repository, + name, + targetOid, + signature, + message + ); + }) + .then((bufferResult) => { + buffer = bufferResult; + return Tag.createBuffer( + repository, + name, + otherTargetOid, + signature, + message + ); + }) + .then((bufferResult) => { + otherBuffer = bufferResult; + return Tag.createFromBuffer(repository, buffer, 1); + }) + .then((oid) => { + return odb.read(oid); + }) + .then((object) => { + const lines = object.toString().split("\n"); + assert(object.type(), Obj.TYPE.TAG); + assert.equal(7, lines.length); + assert.equal(lines[0], `object ${commitPointedTo}`); + assert.equal(lines[1], "type commit"); + assert.equal(lines[2], `tag ${name}`); + assert.equal( + lines[3], + "tagger Shaggy Rogers 987654321 +0130" + ); + assert.equal(lines[4], ""); + assert.equal(lines[5], message); + assert.equal(lines[6], ""); + }) + .then(() => { + // overwriting is okay + return Tag.createFromBuffer(repository, otherBuffer, 1); + }) + .then(() => { + // overwriting is not okay + return Tag.createFromBuffer(repository, buffer, 0); + }) + .then(() => { + return Promise.reject( + new Error("should not be able to create the '" + name + "' tag twice") + ); + }, + () => { + return Promise.resolve(); + }); + }); + + describe("createWithSignature and extractSignature", function() { + it( + "can create a tag with a signature and extract the signature", + function() { + var targetCommit; + var otherTargetCommit; + const name = "created-signed-tag-annotationCreate"; + const repository = this.repository; + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + const signatureLines = [ + "-----BEGIN PGP SIGNATURE-----", + "iQIzBAABCAAdFiEEKdxGpJ93wnkLaBKfURjJKedOfEMFAlxR4JUACgkQURjJKedO", + "fEN+8A//cXmkRmhzQMdTEdrxty7tVKQ7lVhL7r7e+cB84hO7WrDn8549c7/Puflu", + "idanWfyoAEMSNWDgY84lx/t3I3YYKXsLDPT93HiMhCXmPVZcfLxlARRL1rrNZV4q", + "L9hhqb9bFrRNBn6YebhygeLXLHlDKEZzx8W9jnDLU8Px8UTkwdQIDnPDfT7UOPPU", + "MYDgP3OwWwoG8dUlZXaHjtFz29wPlJo177MwdLYwn4zpEIysoY1ev5IKWD+LPW4g", + "vdQnaK1x3dozmG8YLUZw5iW7ap9DpahbAGQgdy1z1ypiNUjNuhaP8zkG1ci6X88N", + "6MIoQ+YqfowRJJTIr1lzssxsRI1syjfS6smnI4ZNE6S+6mIKN96ES2OZF+rn4xnD", + "PofR9Qh2gPq++ULriPE/cX7ZkZ0/ZDZGDfIGvricB8JEJhISZn/VMX/KScJs+rFq", + "KWN5Au6Uc2pEqeq5OP4y2k0QUmKQT9sh9OepnPmfqF8hG6wI8nM67jT/FEOcpr0v", + "qoN2NRXrcq3iZAp07AGq9IdpYhBcEW7MFmOcNt+Zb8SbTMp6DawnREg9xzz1SIkZ", + "Cdp1XoJ6mkVvzBB4T/Esp7j1VztinTX2PpX7C1CE5LC76UfCiEjEWOmWrVuPuA5a", + "oRrJvgPJg8gpVj04r2m8nvUK1gwhxg9ZB+SK+nd3OAd0dnbJwTE=", + "=dW3g", + "-----END PGP SIGNATURE-----" + ]; + const message = "I'm a teapot"; + const signingCallback = (message) => ({ + code: NodeGit.Error.CODE.OK, + signedData: signatureLines.join("\n") + }); + + let odb; + let oid; + let object; + + return repository.getCommit(commitPointedTo).then((commit) => { + targetCommit = commit; + return repository.getCommit(commitPointedTo2); + }).then((commit) => { + otherTargetCommit = commit; + return repository.odb(); + }).then((odbResult) => { + odb = odbResult; + + return Tag.createWithSignature( + repository, + name, + targetCommit, + signature, + message, + 1, + signingCallback + ); + }) + .then((oidResult) => { + oid = oidResult; + return odb.read(oid); + }) + .then((objectResult) => { + object = objectResult; + const lines = object.toString().split("\n"); + assert(object.type(), Obj.TYPE.TAG); + assert.equal(signatureLines.length + 7, lines.length); + assert.equal(lines[0], `object ${commitPointedTo}`); + assert.equal(lines[1], "type commit"); + assert.equal(lines[2], `tag ${name}`); + assert.equal( + lines[3], + "tagger Shaggy Rogers 987654321 +0130" + ); + assert.equal(lines[4], ""); + assert.equal(lines[5], message); + for (let i = 6; i < 6 + signatureLines.length; i++) { + assert.equal(lines[i], signatureLines[i - 6]); + } + assert.equal(lines[6 + signatureLines.length], ""); + + return Tag.lookup(repository, oid); + }) + .then((tag) => { + return tag.extractSignature(); + }) + .then((tagSignature) => { + assert.equal(tagSignature, signatureLines.join("\n")); + }) + .then(() => { + // overwriting is okay + return Tag.createWithSignature( + repository, + name, + targetCommit, + signature, + message, + 1, + signingCallback + ); + }) + .then(() => { + // overwriting is not okay + return Tag.createWithSignature( + repository, + name, + otherTargetCommit, + signature, + message, + 0, + signingCallback + ); + }) + .then(() => { + return Promise.reject( + new Error( + "should not be able to create the '" + name + "' tag twice" + ) + ); + }, + () => { + return Promise.resolve(); + }); + } + ); + + it("can optionally skip the signing process", function() { + var targetCommit; + var otherTargetCommit; + const name = "created-signed-tag-annotationCreate"; + const repository = this.repository; + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + const message = "I'm a teapot"; + const signingCallback = () => ({ + code: NodeGit.Error.CODE.PASSTHROUGH + }); + + let odb; + let oid; + let object; + + return repository.getCommit(commitPointedTo).then((commit) => { + targetCommit = commit; + return repository.getCommit(commitPointedTo2); + }).then((commit) => { + otherTargetCommit = commit; + return repository.odb(); + }).then((odbResult) => { + odb = odbResult; + + return Tag.createWithSignature( + repository, + name, + targetCommit, + signature, + message, + 1, + signingCallback + ); + }) + .then((oidResult) => { + oid = oidResult; + return odb.read(oid); + }) + .then((objectResult) => { + object = objectResult; + const lines = object.toString().split("\n"); + assert(object.type(), Obj.TYPE.TAG); + assert.equal(7, lines.length); + assert.equal(lines[0], `object ${commitPointedTo}`); + assert.equal(lines[1], "type commit"); + assert.equal(lines[2], `tag ${name}`); + assert.equal( + lines[3], + "tagger Shaggy Rogers 987654321 +0130" + ); + assert.equal(lines[4], ""); + assert.equal(lines[5], message); + assert.equal(lines[6], ""); + + return Tag.lookup(repository, oid); + }) + .then((tag) => { + return tag.extractSignature(); + }) + .then(function() { + assert.fail("Tag should not have been signed."); + }, function(error) { + if (error && error.message === "this tag is not signed") { + return; + } + + throw error; + }) + .then(() => { + // overwriting is okay + return Tag.createWithSignature( + repository, + name, + targetCommit, + signature, + message, + 1, + signingCallback + ); + }) + .then(() => { + // overwriting is not okay + return Tag.createWithSignature( + repository, + name, + otherTargetCommit, + signature, + message, + 0, + signingCallback + ); + }) + .then(() => { + return Promise.reject( + new Error("should not be able to create the '" + name + "' tag twice") + ); + }, + () => { + return Promise.resolve(); + }); + }); + + it("will throw if signing callback returns an error code", function() { + var targetCommit; + const name = "created-signed-tag-annotationCreate"; + const repository = this.repository; + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + const message = "I'm a teapot"; + const signingCallback = () => ({ + code: NodeGit.Error.CODE.ERROR + }); + + + return repository.getCommit(commitPointedTo).then((commit) => { + targetCommit = commit; + return Tag.createWithSignature( + repository, + name, + targetCommit, + signature, + message, + 1, + signingCallback + ); + }).then(function() { + assert.fail("Should not have been able to create tag"); + }, function(error) { + if (error && error.errno === NodeGit.Error.CODE.ERROR) { + return; + } + throw error; + }); + }); + }); + + it("will show a deprecation warning if createWithSignature use oid instead object", function() { + var targetCommit; + const name = "created-signed-tag-annotationCreate"; + const repository = this.repository; + const signature = Signature.create( + "Shaggy Rogers", + "shaggy@mystery.com", + 987654321, + 90 + ); + const message = "I'm a teapot"; + const signingCallback = () => ({ + code: NodeGit.Error.CODE.ERROR + }); + + + return repository.getCommit(commitPointedTo).then((commit) => { + targetCommit = commit; + return Tag.createWithSignature( + repository, + name, + targetCommit.id(), + signature, + message, + 1, + signingCallback + ); + }).then(function() { + assert.fail("Should not have been able to create tag"); + }, function(error) { + if (error && error.errno === NodeGit.Error.CODE.ERROR) { + return; + } + throw error; + }); + }); + it("can create a new signed tag with Tag.annotationCreate", function() { - var oid = Oid.fromString(commitPointedTo); + var targetCommit; var name = "created-signed-tag-annotationCreate"; var repository = this.repository; - var signature = Signature.default(repository); + var signature = null; var odb = null; - return repository.odb() + return repository.getCommit(commitPointedTo).then((commit) => { + targetCommit = commit; + return Signature.default(repository); + }).then(function(signatureResult) { + signature = signatureResult; + return repository.odb(); + }) .then(function(theOdb) { odb = theOdb; }) .then(function() { return Tag.annotationCreate( - repository, name, oid, signature, tagMessage); + repository, name, targetCommit, signature, tagMessage); }) .then(function(oid) { return odb.read(oid); diff --git a/test/tests/thread_safety.js b/test/tests/thread_safety.js deleted file mode 100644 index 257c2d51ea..0000000000 --- a/test/tests/thread_safety.js +++ /dev/null @@ -1,65 +0,0 @@ -var assert = require("assert"); -var path = require("path"); -var local = path.join.bind(path, __dirname); - -describe("ThreadSafety", function() { - var NodeGit = require("../../"); - var Repository = NodeGit.Repository; - - var reposPath = local("../repos/workdir"); - - beforeEach(function() { - var test = this; - - return Repository.open(reposPath) - .then(function(repo) { - test.repository = repo; - return repo.refreshIndex(); - }) - .then(function(index) { - test.index = index; - }); - }); - - it("can enable and disable thread safety", function() { - var originalValue = NodeGit.getThreadSafetyStatus(); - - NodeGit.enableThreadSafety(); - assert.equal(NodeGit.THREAD_SAFETY.ENABLED, - NodeGit.getThreadSafetyStatus()); - - NodeGit.setThreadSafetyStatus(NodeGit.THREAD_SAFETY.ENABLED_FOR_ASYNC_ONLY); - assert.equal(NodeGit.THREAD_SAFETY.ENABLED_FOR_ASYNC_ONLY, - NodeGit.getThreadSafetyStatus()); - - NodeGit.setThreadSafetyStatus(NodeGit.THREAD_SAFETY.DISABLED); - assert.equal(NodeGit.THREAD_SAFETY.DISABLED, - NodeGit.getThreadSafetyStatus()); - - NodeGit.setThreadSafetyStatus(originalValue); - }); - - it("can lock something and cleanup mutex", function() { - var diagnostics = NodeGit.getThreadSafetyDiagnostics(); - var originalCount = diagnostics.storedMutexesCount; - // call a sync method to guarantee that it stores a mutex, - // and that it will clean up the mutex in a garbage collection cycle - this.repository.headDetached(); - - diagnostics = NodeGit.getThreadSafetyDiagnostics(); - switch(NodeGit.getThreadSafetyStatus()) { - case NodeGit.THREAD_SAFETY.ENABLED: - // this is a fairly vague test - it just tests that something - // had a mutex created for it at some point (i.e., the thread safety - // code is not completely dead) - assert.ok(diagnostics.storedMutexesCount > 0); - break; - case NodeGit.THREAD_SAFETY.ENABLED_FOR_ASYNC_ONLY: - assert.equal(originalCount, diagnostics.storedMutexesCount); - break; - - case NodeGit.THREAD_SAFETY.DISABLED: - assert.equal(0, diagnostics.storedMutexesCount); - } - }); -}); diff --git a/test/tests/tree.js b/test/tests/tree.js index e5272116d3..052459a80b 100644 --- a/test/tests/tree.js +++ b/test/tests/tree.js @@ -31,11 +31,29 @@ describe("Tree", function() { }); it("gets an entry by name", - function(done) { - this.commit.getTree().then(function(tree) { + function() { + return this.commit.getTree().then(function(tree) { var entry = tree.entryByName("README.md"); assert(entry); - }).done(done); + }); + }); + + it("updates a tree", function () { + var repo = this.existingRepo; + var update = new NodeGit.TreeUpdate(); + update.action = NodeGit.Tree.UPDATE.REMOVE; + update.path = "README.md"; + return this.commit.getTree().then(function(tree) { + return tree.createUpdated(repo, 1, [update]); + }) + .then(function(treeOid) { + return repo.getTree(treeOid); + }) + .then(function(updatedTree) { + assert.throws(function () { + updatedTree.entryByName("README.md"); + }); + }); }); it("walks its entries and returns the same entries on both progress and end", @@ -94,4 +112,13 @@ describe("Tree", function() { ); }); }); + + it("get all paths from a tree", async function () { + const tree = await this.commit.getTree(); + const paths = await tree.getAllFilepaths(); + assert.equal(paths.length, 512); + assert.equal(paths[0], ".gitignore"); + assert.equal(paths[511], "wscript"); + }); + }); diff --git a/test/tests/tree_entry.js b/test/tests/tree_entry.js index 086a4f8d2b..f62f374f04 100644 --- a/test/tests/tree_entry.js +++ b/test/tests/tree_entry.js @@ -61,7 +61,7 @@ describe("TreeEntry", function() { }); }); - it("provides the full path when the entry came from a tree", function(done) { + it("provides the full path when the entry came from a tree", function() { var testTree = function(tree, _dir) { var dir = _dir || "", testPromises = []; @@ -82,10 +82,7 @@ describe("TreeEntry", function() { }; return this.commit.getTree() - .then(testTree) - .done(function() { - done(); - }); + .then(testTree); }); it("provides the blob representation of the entry", function() { diff --git a/test/tests/worker.js b/test/tests/worker.js new file mode 100644 index 0000000000..f39e19e6da --- /dev/null +++ b/test/tests/worker.js @@ -0,0 +1,216 @@ +const path = require("path"); +const assert = require("assert"); +const fse = require("fs-extra"); +const local = path.join.bind(path, __dirname); +const NodeGit = require("../../"); + +let filterName = "psuedo_filter"; +let Worker; + +try { + Worker = require("worker_threads").Worker; +} catch (e) {} + +if (Worker) { + describe("Worker", function() { + const clonePath = local("../repos/clone"); + + // Set a reasonable timeout here now that our repository has grown. + this.timeout(30000); + + beforeEach(function() { + return fse.remove(clonePath).catch(function(err) { + console.log(err); + + throw err; + }); + }); + + afterEach(function() { + return NodeGit.FilterRegistry.unregister(filterName) + .catch(function(error) { + if (error === NodeGit.Error.CODE.ERROR) { + throw new Error("Cannot unregister filter"); + } + }); + }); + + it("can perform basic functionality via worker thread", function(done) { + const workerPath = local("../utils/worker.js"); + const worker = new Worker(workerPath, { + workerData: { + clonePath, + url: "https://github.com/nodegit/test.git" + } + }); + worker.on("message", (message) => { + switch (message) { + case "init": + break; + case "success": + done(); + break; + case "failure": + assert.fail(); + break; + } + }); + worker.on("error", () => assert.fail()); + worker.on("exit", (code) => { + if (code !== 0) { + assert.fail(); + } + }); + }); + + for (let i = 0; i < 5; ++i) { + // disabled until we can address flakiness + it.skip(`can kill worker thread while in use #${i}`, function(done) { // jshint ignore:line + const workerPath = local("../utils/worker.js"); + const worker = new Worker(workerPath, { + workerData: { + clonePath, + url: "https://github.com/nodegit/test.git" + } + }); + worker.on("message", (message) => { + switch (message) { + case "init": + setTimeout(() => { worker.terminate(); }, 500); + break; + case "success": + assert.fail(); + break; + case "failure": + assert.fail(); + break; + } + }); + worker.on("error", () => assert.fail()); + worker.on("exit", (code) => { + if (code === 1) { + done(); + } else { + assert.fail(); + } + }); + }); + } + + // NOTE: first try was to build a test measuring memory used, checking + // that memory allocated by objects was being freed, but it was problematic + // to obtain the memory freed by a different context (a worker) after the + // context was gone, and the data in the tests wasn't consistent. + // So instead this test checks that the count of objects created/destroyed + // during the test match the count of objects being tracked by the + // nodegit::Context, which will be destroyed on context shutdown. To check + // that they are actually being freed can be done with a debugger/profiler. + it("can track objects to free on context shutdown", function(done) { + let testOk; + const workerPath = local("../utils/worker_context_aware.js"); + const worker = new Worker(workerPath, { + workerData: { + clonePath, + url: "https://github.com/nodegit/test.git" + } + }); + worker.on("message", (message) => { + switch (message) { + case "numbersMatch": + testOk = true; + worker.terminate(); + break; + case "numbersDoNotMatch": + testOk = false; + worker.terminate(); + break; + case "failure": + assert.fail(); + break; + } + }); + worker.on("error", () => assert.fail()); + worker.on("exit", (code) => { + if (code === 1 && testOk === true) { + done(); + } + else { + assert.fail(); + } + }); + }); + + // This tests that while calling filter's apply callbacks and the worker + // is terminated, node exits gracefully. To make sure we terminate the + // worker during a checkout, continuous checkouts will be running in a loop. + it("can kill worker thread while doing a checkout and exit gracefully", function(done) { // jshint ignore:line + const workerPath = local("../utils/worker_checkout.js"); + const worker = new Worker(workerPath, { + workerData: { + clonePath, + url: "https://github.com/nodegit/test.git" + } + }); + worker.on("message", (message) => { + switch (message) { + case "init": + // give enough time for the worker to start applying the filter + // during continuous checkouts + setTimeout(() => { worker.terminate(); }, 10000); + break; + case "success": + assert.fail(); + break; + case "failure": + assert.fail(); + break; + } + }); + worker.on("error", () => assert.fail()); + worker.on("exit", (code) => { + if (code == 1) { + done(); + } else { + assert.fail(); + } + }); + }); + + // This tests that after calling filter's apply callbacks and the worker + // is terminated, there will be no memory leaks. + it("can track objects to free on context shutdown after multiple checkouts", function(done) { // jshint ignore:line + let testOk; + const workerPath = local("../utils/worker_context_aware_checkout.js"); + const worker = new Worker(workerPath, { + workerData: { + clonePath, + url: "https://github.com/nodegit/test.git" + } + }); + worker.on("message", (message) => { + switch (message) { + case "numbersMatch": + testOk = true; + worker.terminate(); + break; + case "numbersDoNotMatch": + testOk = false; + worker.terminate(); + break; + case "failure": + assert.fail(); + break; + } + }); + worker.on("error", () => assert.fail()); + worker.on("exit", (code) => { + if (code === 1 && testOk === true) { + done(); + } + else { + assert.fail(); + } + }); + }); + }); +} diff --git a/test/utils/loop_checkout.js b/test/utils/loop_checkout.js new file mode 100644 index 0000000000..f6e6c07360 --- /dev/null +++ b/test/utils/loop_checkout.js @@ -0,0 +1,91 @@ +const fse = require("fs-extra"); +const path = require("path"); +const NodeGit = require("../../"); + +const getDirExtFiles = function(dir, ext, done) { + let results = []; + fse.readdir(dir, function(err, list) { + if (err) { + return done(err); + } + let i = 0; + (function next() { + let file = list[i++]; + if (!file) { + return done(null, results); + } + file = path.resolve(dir, file); + fse.stat(file, function(err, stat) { + if (stat && stat.isDirectory()) { + getDirExtFiles(file, ext, function(err, res) { + results = results.concat(res); + next(); + }); + } else { + if (path.extname(file) == ".".concat(ext)) { + results.push(file); + } + next(); + } + }); + })(); + }); +}; + +const getDirFilesToChange = function(dir, ext) { + return new Promise(function(resolve, reject) { + getDirExtFiles(dir, ext, function(err, results) { + if (err) { + reject(err); + } + resolve(results); + }); + }); +}; + +// Changes the content of files with extension 'ext' +// in directory 'dir' recursively. +// Returns relative file paths +const changeDirExtFiles = function (dir, ext, newText) { + let filesChanged = []; + return getDirFilesToChange(dir, ext) + .then(function(filesWithExt) { + filesWithExt.forEach(function(file) { + fse.writeFile( + file, + newText, + { encoding: "utf-8" } + ); + filesChanged.push(path.relative(dir, file)); + }); + return filesChanged; + }) + .catch(function(err) { + throw new Error("Error getting files with extension .".concat(ext)); + }); +}; + +// 'times' to limit the number of iterations in the loop. +// 0 means no limit. +const loopingCheckoutHead = async function(repoPath, repo, times) { + const text0 = "Text0: changing content to trigger checkout"; + const text1 = "Text1: changing content to trigger checkout"; + + let iteration = 0; + for (let i = 0; true; i = ++i%2) { + const newText = (i == 0) ? text0 : text1; + const jsRelativeFilePahts = await changeDirExtFiles(repoPath, "js", newText); // jshint ignore:line + let checkoutOpts = { + checkoutStrategy: NodeGit.Checkout.STRATEGY.FORCE, + paths: jsRelativeFilePahts + }; + await NodeGit.Checkout.head(repo, checkoutOpts); + + if (++iteration == times) { + break; + } + } + return; +}; + +module.exports = loopingCheckoutHead; \ No newline at end of file diff --git a/test/utils/worker.js b/test/utils/worker.js new file mode 100644 index 0000000000..6f2d218408 --- /dev/null +++ b/test/utils/worker.js @@ -0,0 +1,38 @@ +const { + isMainThread, + parentPort, + workerData +} = require("worker_threads"); +const assert = require("assert"); +const NodeGit = require("../../"); + +if (isMainThread) { + throw new Error("Must be run via worker thread"); +} + +parentPort.postMessage("init"); + +const { clonePath, url } = workerData; +const opts = { + fetchOpts: { + callbacks: { + certificateCheck: () => 0 + } + } +}; + +let repository; +return NodeGit.Clone(url, clonePath, opts).then((_repository) => { + repository = _repository; + assert.ok(repository instanceof NodeGit.Repository); + return repository.index(); +}).then((index) => { + assert.ok(index instanceof NodeGit.Index); + return repository.getRemoteNames(); +}).then((remotes) => { + assert.ok(Array.isArray(remotes)); + return repository.getCurrentBranch(); +}).then((branch) => { + assert.ok(branch instanceof NodeGit.Reference); + parentPort.postMessage("success"); +}).catch(() => parentPort.postMessage("failure")); diff --git a/test/utils/worker_checkout.js b/test/utils/worker_checkout.js new file mode 100644 index 0000000000..adfb7aa7c7 --- /dev/null +++ b/test/utils/worker_checkout.js @@ -0,0 +1,51 @@ +const { + isMainThread, + parentPort, + workerData +} = require("worker_threads"); +const assert = require("assert"); +const NodeGit = require("../../"); +const loopingCheckoutHead = require("./loop_checkout.js"); + +if (isMainThread) { + throw new Error("Must be run via worker thread"); +} + +parentPort.postMessage("init"); + +const { clonePath, url } = workerData; +const cloneOpts = { + fetchOpts: { + callbacks: { + certificateCheck: () => 0 + } + } +}; + +let repository; +let filterName = "psuedo_filter"; +let applyCallbackResult = 1; + +return NodeGit.Clone(url, clonePath, cloneOpts) +.then(function(_repository) { + repository = _repository; + assert.ok(repository instanceof NodeGit.Repository); + return NodeGit.FilterRegistry.register(filterName, { + apply: function() { + applyCallbackResult = 0; + }, + check: function() { + return NodeGit.Error.CODE.OK; + } + }, 0); +}) +.then(function(result) { + assert.strictEqual(result, NodeGit.Error.CODE.OK); + return loopingCheckoutHead(clonePath, repository, 0); +}).then(function() { + assert.strictEqual(applyCallbackResult, 0); + parentPort.postMessage("success"); +}) +.catch((err) => { + parentPort.postMessage("failure"); +}); \ No newline at end of file diff --git a/test/utils/worker_context_aware.js b/test/utils/worker_context_aware.js new file mode 100644 index 0000000000..b7784fdaef --- /dev/null +++ b/test/utils/worker_context_aware.js @@ -0,0 +1,74 @@ +const { + isMainThread, + parentPort, + workerData +} = require("worker_threads"); +const garbageCollect = require("./garbage_collect.js"); +const assert = require("assert"); +const NodeGit = require("../../"); +const { promisify } = require("util"); + +if (isMainThread) { + throw new Error("Must be run via worker thread"); +} + +const { clonePath, url } = workerData; +const opts = { + fetchOpts: { + callbacks: { + certificateCheck: () => 0 + } + } +}; + +let repository; +const oid = "fce88902e66c72b5b93e75bdb5ae717038b221f6"; + +return NodeGit.Clone(url, clonePath, opts) +.then((_repository) => { + repository = _repository; + assert.ok(repository instanceof NodeGit.Repository); + return repository.getCommit(oid); +}).then((commit) => { + assert.ok(commit instanceof NodeGit.Commit); + var historyCount = 0; + var history = commit.history(); + + history.on("commit", function(commit) { + // Number of commits is known to be higher than 200 + if (++historyCount == 200) { + // Tracked objects must work too when the Garbage Collector is triggered + garbageCollect(); + + // Count total of objects left after being created/destroyed + const freeingCount = + NodeGit.Cert.getNonSelfFreeingConstructedCount() + + NodeGit.Repository.getSelfFreeingInstanceCount() + + NodeGit.Commit.getSelfFreeingInstanceCount() + + NodeGit.Oid.getSelfFreeingInstanceCount() + + NodeGit.Revwalk.getSelfFreeingInstanceCount(); + + const numberOfTrackedObjects = NodeGit.getNumberOfTrackedObjects(); + + if (freeingCount === numberOfTrackedObjects) { + parentPort.postMessage("numbersMatch"); + } + else { + parentPort.postMessage("numbersDoNotMatch"); + } + } + }); + + history.on("end", function(commits) { + // Test should not get this far + parentPort.postMessage("failure"); + }); + + history.on("error", function(err) { + assert.ok(false); + }); + + history.start(); + + return promisify(setTimeout)(50000); +}).catch(() => parentPort.postMessage("failure")); \ No newline at end of file diff --git a/test/utils/worker_context_aware_checkout.js b/test/utils/worker_context_aware_checkout.js new file mode 100644 index 0000000000..6562915a49 --- /dev/null +++ b/test/utils/worker_context_aware_checkout.js @@ -0,0 +1,66 @@ +const { + isMainThread, + parentPort, + workerData +} = require("worker_threads"); +const garbageCollect = require("./garbage_collect.js"); +const assert = require("assert"); +const NodeGit = require("../../"); +const loopingCheckoutHead = require("./loop_checkout.js"); +const { promisify } = require("util"); + +if (isMainThread) { + throw new Error("Must be run via worker thread"); +} + +const { clonePath, url } = workerData; +const cloneOpts = { + fetchOpts: { + callbacks: { + certificateCheck: () => 0 + } + } +}; + +let repository; +let filterName = "psuedo_filter"; +let applyCallbackResult = 1; + +return NodeGit.Clone(url, clonePath, cloneOpts) +.then(function(_repository) { + repository = _repository; + assert.ok(repository instanceof NodeGit.Repository); + return NodeGit.FilterRegistry.register(filterName, { + apply: function() { + applyCallbackResult = 0; + }, + check: function() { + return NodeGit.Error.CODE.OK; + } + }, 0); +}) +.then(function(result) { + assert.strictEqual(result, NodeGit.Error.CODE.OK); + return loopingCheckoutHead(clonePath, repository, 10); +}).then(function() { + assert.strictEqual(applyCallbackResult, 0); + // Tracked objects must work too when the Garbage Collector is triggered + garbageCollect(); + + // Count total of objects left after being created/destroyed + const freeingCount = + NodeGit.Cert.getNonSelfFreeingConstructedCount() + + NodeGit.FilterSource.getNonSelfFreeingConstructedCount() + + NodeGit.Buf.getNonSelfFreeingConstructedCount() + + NodeGit.Repository.getSelfFreeingInstanceCount(); + + const numberOfTrackedObjects = NodeGit.getNumberOfTrackedObjects(); + + if (freeingCount === numberOfTrackedObjects) { + parentPort.postMessage("numbersMatch"); + } + else { + parentPort.postMessage("numbersDoNotMatch"); + } + return promisify(setTimeout)(50000); +}).catch((err) => parentPort.postMessage("failure")); \ No newline at end of file diff --git a/utils/README.md b/utils/README.md index 1773e9979e..cf046f05b8 100644 --- a/utils/README.md +++ b/utils/README.md @@ -5,17 +5,5 @@ #### buildFlags Determines how NodeGit should build. Use `BUILD_ONLY` environment variable to build from source. - ## discoverOpenSSLDistros - Crawls a series of static URLS on the [Conan package manager](https://conan.io/) for the [latest release of OpenSSL](https://bintray.com/conan-community/conan/OpenSSL%3Aconan#files/conan%2FOpenSSL%2F1.1.0i) (1.1.0i at the time of writing). It acquires URLS for releases of statically linked binaries and header files of OpenSSL for Mac and Windows. The provided binaries are compiled on: - - * Mac: clang-8.1 or clang-9. - * Windows: vs12, vs14, vs15 - - The discovered distributions are written into `vendor/static_config/openssl_distributions.json`. This script does not need to be run unless you are updating the version of OpenSSL to build against. - ## acquireOpenSSL - Download the OpenSSL binaries and headers applicable to the current OS for the latest compiler version (clang-9/vs14). Uses links from `vendor/static_config/openssl_distributions.json`. - - TODO: - * Make the script pull the debug versions if node-gyp is building in debug mode - * Make the script pull down a version of the binaries that matches the system compiler + Download and compile OpenSSL. diff --git a/utils/acquireOpenSSL.js b/utils/acquireOpenSSL.js deleted file mode 100644 index 1c1ca29dfa..0000000000 --- a/utils/acquireOpenSSL.js +++ /dev/null @@ -1,90 +0,0 @@ -const fse = require("fs-extra"); -const path = require("path"); -const R = require("ramda"); -const request = require("request-promise-native"); -const stream = require("stream"); -const tar = require("tar-fs"); -const zlib = require("zlib"); - -const vendorPath = path.resolve(__dirname, "..", "vendor"); -const distrosFilePath = path.join(vendorPath, "static_config", "openssl_distributions.json"); -const extractPath = path.join(vendorPath, "openssl"); - -const getOSName = () => { - if (process.platform === "win32") { - if (process.arch === "x64") { - return "win64"; - } else { - return "win32"; - } - } else if (process.platform === "darwin") { - return "macOS"; - } else { - // We only discover distros for Mac and Windows. We don't care about any other OS. - return "unknown"; - } -}; - -const getCompilerVersion = () => { - // TODO: Get actual compiler version. For now, just assume latest compiler for distros in openssl_distributions.js - const osName = getOSName(); - if (osName === "win32" || osName === "win64") { - return "vs14"; - } else if (osName === "macOS") { - return "clang-9"; - } else { - // We only discover distros for Mac and Windows. We don't care about any other OS. - return "unknown"; - } -}; - -// TODO: Determine if we are GYPing in Debug -const getIsDebug = () => false; - -const getMatchingDistributionName = () => - `${getOSName()}-${getCompilerVersion()}-static${getIsDebug() ? "-debug" : "-release"}`; - -const getDistributionsConfig = () => - fse.readFile(distrosFilePath, "utf8") - .then(JSON.parse); - -const getDistrbutionURLFromConfig = (config) => { - const distName = getMatchingDistributionName(); - const distURL = R.propOr(null, distName, config); - - if (!distURL) { - return Promise.reject(new Error("No matching distribution for this operating system")); - } - return Promise.resolve(distURL); -}; - -const fetchFileFromURL = (distUrl) => request({ - method: "GET", - uri: distUrl, - encoding: null, - gzip: true -}); - -const extractFile = (body) => new Promise((resolve, reject) => { - const streamableBody = new stream.Readable(); - streamableBody.push(body); - streamableBody.push(null); - streamableBody - .pipe(zlib.createGunzip()) - .on("error", reject) - .pipe(tar.extract(extractPath)) - .on("error", reject) - .on("close", resolve); -}); - -const acquireOpenSSL = () => - getDistributionsConfig() - .then(getDistrbutionURLFromConfig) - .then(fetchFileFromURL) - .then(extractFile) - .catch((e) => { - console.error(e); - process.exit(1); - }); - -acquireOpenSSL(); diff --git a/utils/acquireOpenSSL.mjs b/utils/acquireOpenSSL.mjs new file mode 100644 index 0000000000..930b5ab33c --- /dev/null +++ b/utils/acquireOpenSSL.mjs @@ -0,0 +1,488 @@ +import crypto from "crypto"; +import { spawn } from "child_process"; +import execPromise from "./execPromise.js"; +import got from "got"; +import path from "path"; +import stream from "stream"; +import tar from "tar-fs"; +import zlib from "zlib"; +import { createWriteStream, promises as fs } from "fs"; +import { performance } from "perf_hooks"; +import { promisify } from "util"; + +import { hostArch, targetArch } from "./buildFlags.js"; + +const pipeline = promisify(stream.pipeline); + +import packageJson from '../package.json' with { type: "json" }; + +const OPENSSL_VERSION = "3.0.18"; +const win32BatPath = path.join(import.meta.dirname, "build-openssl.bat"); +const vendorPath = path.resolve(import.meta.dirname, "..", "vendor"); +const opensslPatchPath = path.join(vendorPath, "patches", "openssl"); +const extractPath = path.join(vendorPath, "openssl"); + +const exists = (filePath) => fs.stat(filePath).then(() => true).catch(() => false); + +const pathsToIncludeForPackage = [ + "include", "lib" +]; + +const getOpenSSLSourceUrl = (version) => `https://www.openssl.org/source/openssl-${version}.tar.gz`; +const getOpenSSLSourceSha256Url = (version) => `${getOpenSSLSourceUrl(version)}.sha256`; + +class HashVerify extends stream.Transform { + constructor(algorithm, onFinal) { + super(); + this.onFinal = onFinal; + this.hash = crypto.createHash(algorithm); + } + + _transform(chunk, encoding, callback) { + this.hash.update(chunk, encoding); + callback(null, chunk); + } + + _final(callback) { + const digest = this.hash.digest("hex"); + const onFinalResult = this.onFinal(digest); + callback(onFinalResult); + } +} + +const makeHashVerifyOnFinal = (expected) => (digest) => { + const digestOk = digest === expected; + return digestOk + ? null + : new Error(`Digest not OK: ${digest} !== ${this.expected}`); +}; + +// currently this only needs to be done on linux +const applyOpenSSLPatches = async (buildCwd, operatingSystem) => { + try { + await fs.access(opensslPatchPath); + + for (const patchFilename of await fs.readdir(opensslPatchPath)) { + const patchTarget = patchFilename.split("-")[1]; + if (patchFilename.split(".").pop() === "patch" && (patchTarget === operatingSystem || patchTarget === "all")) { + console.log(`applying ${patchFilename}`); + await execPromise(`patch -up0 -i ${path.join(opensslPatchPath, patchFilename)}`, { + cwd: buildCwd + }, { pipeOutput: true }); + } + } + } catch(e) { + if (e.code === "ENOENT") { + // no patches to apply + return; + } + + console.log("Patch application failed: ", e); + throw e; + } +} + +const buildDarwin = async (buildCwd, macOsDeploymentTarget) => { + if (!macOsDeploymentTarget) { + throw new Error("Expected macOsDeploymentTarget to be specified"); + } + + const buildConfig = targetArch === "x64" ? "darwin64-x86_64-cc" : "darwin64-arm64-cc"; + + const configureArgs = [ + buildConfig, + // speed up ecdh on little-endian platforms with 128bit int support + "enable-ec_nistp_64_gcc_128", + // compile static libraries + "no-shared", + // disable ssl2, ssl3, and compression + "no-ssl2", + "no-ssl3", + "no-comp", + // disable tty ui since it fails a bunch of tests on GHA runners and we're just gonna link anyways + "no-ui-console", + // set install directory + `--prefix="${extractPath}"`, + `--openssldir="${extractPath}"`, + // set macos version requirement + `-mmacosx-version-min=${macOsDeploymentTarget}` + ]; + + await execPromise(`./Configure ${configureArgs.join(" ")}`, { + cwd: buildCwd + }, { pipeOutput: true }); + + await applyOpenSSLPatches(buildCwd, "darwin"); + + // only build the libraries, not the fuzzer or apps + await execPromise("make build_libs", { + cwd: buildCwd + }, { pipeOutput: true }); + + await execPromise("make test", { + cwd: buildCwd + }, { pipeOutput: true }); + + await execPromise("make install_sw", { + cwd: buildCwd, + maxBuffer: 10 * 1024 * 1024 // we should really just use spawn + }, { pipeOutput: true }); +}; + +const buildLinux = async (buildCwd) => { + const buildConfig = targetArch === "x64" ? "linux-x86_64" : "linux-aarch64"; + + const configureArgs = [ + buildConfig, + // disable ssl3, and compression + "no-ssl3", + "no-comp", + // set install directory + `--prefix="${extractPath}"`, + `--openssldir="${extractPath}"`, + "--libdir=lib", + ]; + await execPromise(`./Configure ${configureArgs.join(" ")}`, { + cwd: buildCwd + }, { pipeOutput: true }); + + await applyOpenSSLPatches(buildCwd, "linux"); + + // only build the libraries, not the fuzzer or apps + await execPromise("make build_libs", { + cwd: buildCwd, + maxBuffer: 10 * 1024 * 1024 + }, { pipeOutput: true }); + + if (hostArch === targetArch) { + await execPromise("make test", { + cwd: buildCwd, + maxBuffer: 10 * 1024 * 1024 + }, { pipeOutput: true }); + } + + // only install software, not the docs + await execPromise("make install_sw", { + cwd: buildCwd, + maxBuffer: 10 * 1024 * 1024 // we should really just use spawn + }, { pipeOutput: true }); +}; + +const buildWin32 = async (buildCwd) => { + let vcvarsallPath = undefined; + + if (process.env.npm_config_vcvarsall_path && await exists(process.env.npm_config_vcvarsall_path)) { + vcvarsallPath = process.env.npm_config_vcvarsall_path; + } else { + const potentialMsvsPaths = []; + + // GYP_MSVS_OVERRIDE_PATH is set by node-gyp so this should cover most cases + if (process.env.GYP_MSVS_OVERRIDE_PATH) { + potentialMsvsPaths.push(process.env.GYP_MSVS_OVERRIDE_PATH); + } + + const packageTypes = ["BuildTools", "Community", "Professional", "Enterprise"]; + const versions = ["2022", "2019"] + + const computePossiblePaths = (parentPath) => { + let possiblePaths = [] + for (const packageType of packageTypes) { + for (const version of versions) { + possiblePaths.push(path.join(parentPath, version, packageType)); + } + } + + return possiblePaths; + } + + if (process.env["ProgramFiles(x86)"]) { + const parentPath = path.join(process.env["ProgramFiles(x86)"], 'Microsoft Visual Studio'); + potentialMsvsPaths.push(...computePossiblePaths(parentPath)); + } + + if (process.env.ProgramFiles) { + const parentPath = path.join(process.env.ProgramFiles, 'Microsoft Visual Studio'); + potentialMsvsPaths.push(...computePossiblePaths(parentPath)); + } + + for (const potentialPath of potentialMsvsPaths) { + const wholePath = path.join(potentialPath, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat'); + console.log("checking", wholePath); + if (await exists(wholePath)) { + vcvarsallPath = wholePath; + break; + } + } + + if (!vcvarsallPath) { + throw new Error(`vcvarsall.bat not found`); + } + } + + let vcTarget; + switch (targetArch) { + case "x64": + vcTarget = "VC-WIN64A"; + break; + + case "x86": + vcTarget = "VC-WIN32"; + break; + + case "arm64": + vcTarget = "VC-WIN64-ARM"; + break; + } + + let vsBuildArch = hostArch === targetArch + ? hostArch + : `${hostArch}_${targetArch}`; + + console.log("Using vcvarsall.bat at: ", vcvarsallPath); + console.log("Using vsBuildArch: ", vsBuildArch); + console.log("Using vcTarget: ", vcTarget); + + await new Promise((resolve, reject) => { + const buildProcess = spawn(`"${win32BatPath}" "${vcvarsallPath}" ${vsBuildArch} ${vcTarget}`, { + cwd: buildCwd, + shell: process.platform === "win32", + env: { + ...process.env, + NODEGIT_SKIP_TESTS: targetArch !== hostArch ? "1" : undefined + } + }); + + buildProcess.stdout.on("data", function(data) { + console.info(data.toString().trim()); + }); + + buildProcess.stderr.on("data", function(data) { + console.error(data.toString().trim()); + }); + + buildProcess.on("close", function(code) { + if (!code) { + resolve(); + } else { + reject(code); + } + }); + }); + + +}; + +const removeOpenSSLIfOudated = async (openSSLVersion) => { + try { + let openSSLResult; + try { + const openSSLPath = path.join(extractPath, "bin", "openssl"); + openSSLResult = await execPromise(`${openSSLPath} version`); + } catch { + /* if we fail to get the version, assume removal not required */ + } + + if (!openSSLResult) { + return; + } + + const versionMatch = openSSLResult.match(/^OpenSSL (\d\.\d\.\d[a-z]*)/); + const installedVersion = versionMatch && versionMatch[1]; + if (!installedVersion || installedVersion === openSSLVersion) { + return; + } + + console.log("Removing outdated OpenSSL at: ", extractPath); + await fs.rm(extractPath, { recursive: true, force: true }); + console.log("Outdated OpenSSL removed."); + } catch (err) { + console.log("Remove outdated OpenSSL failed: ", err); + } +}; + +const makeOnStreamDownloadProgress = () => { + let lastReport = performance.now(); + return ({ percent, transferred, total }) => { + const currentTime = performance.now(); + if (currentTime - lastReport > 1 * 1000) { + lastReport = currentTime; + console.log(`progress: ${transferred}/${total} (${(percent * 100).toFixed(2)}%)`) + } + }; +}; + +const buildOpenSSLIfNecessary = async ({ + macOsDeploymentTarget, + openSSLVersion +}) => { + if (process.platform !== "darwin" && process.platform !== "win32" && process.platform !== "linux") { + console.log(`Skipping OpenSSL build, not required on ${process.platform}`); + return; + } + + await removeOpenSSLIfOudated(openSSLVersion); + + try { + await fs.stat(extractPath); + console.log("Skipping OpenSSL build, dir exists"); + return; + } catch {} + + const openSSLUrl = getOpenSSLSourceUrl(openSSLVersion); + const openSSLSha256Url = getOpenSSLSourceSha256Url(openSSLVersion); + + const openSSLSha256 = (await got(openSSLSha256Url)).body.trim().split(' ')[0]; + + const downloadStream = got.stream(openSSLUrl); + downloadStream.on("downloadProgress", makeOnStreamDownloadProgress()); + + await pipeline( + downloadStream, + new HashVerify("sha256", makeHashVerifyOnFinal(openSSLSha256)), + zlib.createGunzip(), + tar.extract(extractPath) + ); + + console.log(`OpenSSL ${openSSLVersion} download + extract complete: SHA256 OK.`); + + const buildCwd = path.join(extractPath, `openssl-${openSSLVersion}`); + + if (process.platform === "darwin") { + await buildDarwin(buildCwd, macOsDeploymentTarget); + } else if (process.platform === "linux") { + await buildLinux(buildCwd); + } else if (process.platform === "win32") { + await buildWin32(buildCwd); + } else { + throw new Error(`Unknown platform: ${process.platform}`); + } + + console.log("Build finished."); +} + +const downloadOpenSSLIfNecessary = async ({ + downloadBinUrl, + maybeDownloadSha256, + maybeDownloadSha256Url +}) => { + if (process.platform !== "darwin" && process.platform !== "win32" && process.platform !== "linux") { + console.log(`Skipping OpenSSL download, not required on ${process.platform}`); + return; + } + + try { + await fs.stat(extractPath); + console.log("Skipping OpenSSL download, dir exists"); + return; + } catch {} + + if (maybeDownloadSha256Url) { + maybeDownloadSha256 = (await got(maybeDownloadSha256Url)).body.trim(); + } + + const downloadStream = got.stream(downloadBinUrl); + downloadStream.on("downloadProgress", makeOnStreamDownloadProgress()); + + const pipelineSteps = [ + downloadStream, + maybeDownloadSha256 + ? new HashVerify("sha256", makeHashVerifyOnFinal(maybeDownloadSha256)) + : null, + zlib.createGunzip(), + tar.extract(extractPath) + ].filter(step => step !== null); + await pipeline( + ...pipelineSteps + ); + + console.log(`OpenSSL download + extract complete${maybeDownloadSha256 ? ": SHA256 OK." : "."}`); + console.log("Download finished."); +} + +export const getOpenSSLPackageName = () => { + return `openssl-${OPENSSL_VERSION}-${process.platform}-${targetArch}.tar.gz`; +} + +export const getOpenSSLPackagePath = () => path.join(import.meta.dirname, getOpenSSLPackageName()); + +const getOpenSSLPackageUrl = () => { + const hostUrl = new URL(packageJson.binary.host); + hostUrl.pathname = getOpenSSLPackageName(); + return hostUrl.toString(); +}; + +const buildPackage = async () => { + let resolve, reject; + const promise = new Promise((_resolve, _reject) => { + resolve = _resolve; + reject = _reject; + }); + await pipeline( + tar.pack(extractPath, { + entries: pathsToIncludeForPackage, + ignore: (name) => { + // Ignore pkgconfig files + return path.extname(name) === ".pc" + || path.basename(name) === "pkgconfig"; + }, + dmode: 0o0755, + fmode: 0o0644 + }), + zlib.createGzip(), + new HashVerify("sha256", (digest) => { + resolve(digest); + }), + createWriteStream(getOpenSSLPackagePath()) + ); + const digest = await promise; + await fs.writeFile(`${getOpenSSLPackagePath()}.sha256`, digest); +}; + +const acquireOpenSSL = async () => { + try { + const downloadBinUrl = process.env.npm_config_openssl_bin_url + || (['win32', 'darwin'].includes(process.platform) ? getOpenSSLPackageUrl() : undefined); + if (downloadBinUrl && downloadBinUrl !== 'skip' && !process.env.NODEGIT_OPENSSL_BUILD_PACKAGE) { + const downloadOptions = { downloadBinUrl }; + if (process.env.npm_config_openssl_bin_sha256 !== 'skip') { + if (process.env.npm_config_openssl_bin_sha256) { + downloadOptions.maybeDownloadSha256 = process.env.npm_config_openssl_bin_sha256; + } else { + downloadOptions.maybeDownloadSha256Url = `${getOpenSSLPackageUrl()}.sha256`; + } + } + + await downloadOpenSSLIfNecessary(downloadOptions); + return; + } + + let macOsDeploymentTarget; + if (process.platform === "darwin") { + macOsDeploymentTarget = process.argv[2] ?? process.env.OPENSSL_MACOS_DEPLOYMENT_TARGET + if (!macOsDeploymentTarget || !macOsDeploymentTarget.match(/\d+\.\d+/)) { + throw new Error(`Invalid macOsDeploymentTarget: ${macOsDeploymentTarget}`); + } + } + + await buildOpenSSLIfNecessary({ + openSSLVersion: OPENSSL_VERSION, + macOsDeploymentTarget + }); + if (process.env.NODEGIT_OPENSSL_BUILD_PACKAGE) { + await buildPackage(); + } + } catch (err) { + console.error("Acquire failed: ", err); + process.exit(1); + } +}; + +if (process.argv[1] === import.meta.filename) { + try { + await acquireOpenSSL(); + } + catch(error) { + console.error("Acquire OpenSSL failed: ", error); + process.exit(1); + } +} diff --git a/utils/build-openssl.bat b/utils/build-openssl.bat new file mode 100644 index 0000000000..af8063d7c4 --- /dev/null +++ b/utils/build-openssl.bat @@ -0,0 +1,22 @@ +rem Build OpenSSL for Windows +rem %1 - path to vcvarsall.bat +rem %2 - architecture argument for vcvarsall.bat +rem %3 - OpenSSL Configure target + +@call %1 %2 + +perl .\Configure %3 no-shared no-ssl2 no-ssl3 no-comp --prefix="%cd%\.." --openssldir="%cd%\.." || goto :error + +nmake || goto :error + +if "%NODEGIT_SKIP_TESTS%" NEQ "1" ( + nmake test || goto :error +) + +nmake install || goto :error + +goto :EOF + +:error +echo Failed with error #%errorlevel%. +exit /b %errorlevel% \ No newline at end of file diff --git a/utils/buildFlags.js b/utils/buildFlags.js index 3c3d9d9b21..7ea87428ba 100644 --- a/utils/buildFlags.js +++ b/utils/buildFlags.js @@ -10,7 +10,29 @@ try { isGitRepo = false; } +const convertArch = (archStr) => { + const convertedArch = { + 'ia32': 'x86', + 'x86': 'x86', + 'x64': 'x64', + 'arm64': 'arm64' + }[archStr]; + + if (!convertedArch) { + throw new Error('unsupported architecture'); + } + + return convertedArch; +} + +const hostArch = convertArch(process.arch); +const targetArch = process.env.npm_config_arch + ? convertArch(process.env.npm_config_arch) + : hostArch; + module.exports = { + hostArch, + targetArch, debugBuild: !!process.env.BUILD_DEBUG, isElectron: process.env.npm_config_runtime === "electron", isGitRepo: isGitRepo, diff --git a/utils/configureLibssh2.js b/utils/configureLibssh2.js index 3ee8cd9330..95fd5d3649 100644 --- a/utils/configureLibssh2.js +++ b/utils/configureLibssh2.js @@ -1,7 +1,10 @@ var cp = require("child_process"); -var fse = require('fs-extra'); +var fse = require("fs-extra"); var path = require("path"); +const { hostArch, targetArch } = require("./buildFlags"); + +const opensslVendorDirectory = path.resolve(__dirname, "..", "vendor", "openssl"); const libssh2VendorDirectory = path.resolve(__dirname, "..", "vendor", "libssh2"); const libssh2ConfigureScript = path.join(libssh2VendorDirectory, "configure"); const libssh2StaticConfigDirectory = path.resolve(__dirname, "..", "vendor", "static_config", "libssh2"); @@ -18,31 +21,21 @@ module.exports = function retrieveExternalDependencies() { } // Run the `configure` script on Linux - return new Promise(function(resolve, reject) { - - var opensslDir = process.argv[2]; - var isElectron = process.argv[3] === "1"; - var opensslIncludes = isElectron ? path.join(opensslDir, "includes") : opensslDir; + let cpArgs = ` --with-libssl-prefix=${opensslVendorDirectory}`; - var newEnv = {}; - Object.keys(process.env).forEach(function(key) { - newEnv[key] = process.env[key]; - }); + const archConfigMap = { + 'x64': 'x86_64-linux-gnu', + 'arm64': 'aarch64-linux-gnu' + }; - newEnv.CPPFLAGS = newEnv.CPPFLAGS || ""; - newEnv.CPPFLAGS += ` -I${opensslIncludes}`; - newEnv.CPPFLAGS = newEnv.CPPFLAGS.trim(); - - var maybeLibsslPrefix = ""; - if (isElectron) { - maybeLibsslPrefix = ` --with-libssl-prefix=${opensslDir}`; - } + cpArgs += ` --build=${archConfigMap[hostArch]}`; + cpArgs += ` --host=${archConfigMap[targetArch]}`; + return new Promise(function(resolve, reject) { cp.exec( - libssh2ConfigureScript + maybeLibsslPrefix, + `${libssh2ConfigureScript}${cpArgs}`, { - cwd: libssh2VendorDirectory, - env: newEnv + cwd: libssh2VendorDirectory }, function(err, stdout, stderr) { if (err) { diff --git a/utils/defaultCxxStandard.js b/utils/defaultCxxStandard.js new file mode 100644 index 0000000000..5a7d7beb5a --- /dev/null +++ b/utils/defaultCxxStandard.js @@ -0,0 +1,24 @@ +const targetSpecified = process.argv[2] !== 'none'; + +let cxxStandard = '14'; + +if (targetSpecified) { + // Assume electron if target is specified. + // If building node 18 / 19 via target, will need to specify C++ standard manually + const majorVersion = process.argv[2].split('.')[0]; + if (Number.parseInt(majorVersion) >= 32) { + cxxStandard = '20'; + } else if (Number.parseInt(majorVersion) >= 21) { + cxxStandard = '17'; + } +} else { + const abiVersion = Number.parseInt(process.versions.modules) ?? 0; + // Node 18 === 108 + if (abiVersion >= 131) { + cxxStandard = '20'; + } else if (abiVersion >= 108) { + cxxStandard = '17'; + } +} + +process.stdout.write(cxxStandard); diff --git a/utils/discoverOpenSSLDistros.js b/utils/discoverOpenSSLDistros.js deleted file mode 100644 index 5a413a8e63..0000000000 --- a/utils/discoverOpenSSLDistros.js +++ /dev/null @@ -1,184 +0,0 @@ -const cheerio = require("cheerio"); -const fse = require("fs-extra"); -const path = require("path"); -const R = require("ramda"); -const request = require("request-promise-native"); - -const windowsCommonConditions = [ - R.test(/^\s*os=Windows$/gm), - R.test(/^\s*shared=False$/gm) -]; - -const macCommonConditions = [ - R.test(/^\s*arch=x86_64$/gm), - R.test(/^\s*os=Macos$/gm), - R.test(/^\s*compiler=apple-clang$/gm), - R.test(/^\s*shared=False$/gm) -]; - -const debugPairs = R.toPairs({ - "win32-vs12-static-debug": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86$/gm), - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.runtime=MTd$/gm), - R.test(/^\s*compiler\.version=12$/gm) - ]), - "win32-vs14-static-debug": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86$/gm), - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.runtime=MTd$/gm), - R.test(/^\s*compiler\.version=14$/gm) - ]), - "win32-vs15-static-debug": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86$/gm), - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.runtime=MTd$/gm), - R.test(/^\s*compiler\.version=15$/gm) - ]), - - "win64-vs12-static-debug": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86_64$/gm), - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.runtime=MTd$/gm), - R.test(/^\s*compiler\.version=12$/gm) - ]), - "win64-vs14-static-debug": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86_64$/gm), - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.runtime=MTd$/gm), - R.test(/^\s*compiler\.version=14$/gm) - ]), - "win64-vs15-static-debug": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86_64$/gm), - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.runtime=MTd$/gm), - R.test(/^\s*compiler\.version=15$/gm) - ]), - - "macOS-clang-9-static-debug": R.allPass([ - ...macCommonConditions, - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.version=9.0$/gm) - ]), - "macOS-clang-8.1-static-debug": R.allPass([ - ...macCommonConditions, - R.test(/^\s*build_type=Debug$/gm), - R.test(/^\s*compiler\.version=8\.1$/gm) - ]) -}); - -const releasePairs = R.toPairs({ - "win32-vs12-static-release": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86$/gm), - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.runtime=MT$/gm), - R.test(/^\s*compiler\.version=12$/gm) - ]), - "win32-vs14-static-release": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86$/gm), - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.runtime=MT$/gm), - R.test(/^\s*compiler\.version=14$/gm) - ]), - "win32-vs15-static-release": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86$/gm), - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.runtime=MT$/gm), - R.test(/^\s*compiler\.version=15$/gm) - ]), - - "win64-vs12-static-release": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86_64$/gm), - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.runtime=MT$/gm), - R.test(/^\s*compiler\.version=12$/gm) - ]), - "win64-vs14-static-release": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86_64$/gm), - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.runtime=MT$/gm), - R.test(/^\s*compiler\.version=14$/gm) - ]), - "win64-vs15-static-release": R.allPass([ - ...windowsCommonConditions, - R.test(/^\s*arch=x86_64$/gm), - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.runtime=MT$/gm), - R.test(/^\s*compiler\.version=15$/gm) - ]), - - "macOS-clang-9-static-release": R.allPass([ - ...macCommonConditions, - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.version=9.0$/gm) - ]), - "macOS-clang-8.1-static-release": R.allPass([ - ...macCommonConditions, - R.test(/^\s*build_type=Release$/gm), - R.test(/^\s*compiler\.version=8\.1$/gm) - ]) -}); - -const distributionPairs = [...debugPairs, ...releasePairs]; - -const getDistributionConfigURLFromHash = itemHash => - `https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/${itemHash}/conaninfo.txt`; - -const getDistributionDownloadURLFromHash = itemHash => - `https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/${itemHash}/conan_package.tgz`; - -const getDistributionsRootURL = () => - "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/"; - -const detectDistributionPairFromConfig = (itemHash, body) => R.pipe( - R.find(([_, predicate]) => predicate(body)), - (distributionPair) => distributionPair - ? [distributionPair[0], getDistributionDownloadURLFromHash(itemHash)] - : undefined -)(distributionPairs); - -const getDistributionConfig = (itemHash) => - request.get(getDistributionConfigURLFromHash(itemHash)) - .then((body) => detectDistributionPairFromConfig(itemHash, body)); - -const discoverDistributions = (treeHtml) => { - const releaseHashes = []; - - const $ = cheerio.load(treeHtml); - $("a").each((_, link) => { - const linkText = link.children[0].data; - if (!linkText) { - return; - } - // Trim off the trailing '/' - const releaseHash = linkText.substring(0, linkText.length - 1); - releaseHashes.push(releaseHash); - }); - - return Promise.all( - R.map(releaseHash => getDistributionConfig(releaseHash), releaseHashes) - ); -} - -const writeFile = (distributions) => - fse.ensureDir(path.dirname(outputPath)) - .then(fse.writeFile(outputPath, JSON.stringify(distributions, null, 2))); - -const outputPath = path.resolve(__dirname, "..", "vendor", "static_config", "openssl_distributions.json"); -request(getDistributionsRootURL()) - .then(discoverDistributions) - .then(R.filter(R.identity)) - .then(R.sortBy(R.prop(0))) - .then(R.fromPairs) - .then(writeFile); diff --git a/utils/execPromise.js b/utils/execPromise.js index c186a12bcc..acdc785ec6 100644 --- a/utils/execPromise.js +++ b/utils/execPromise.js @@ -3,9 +3,9 @@ var cp = require('child_process'); // We have to manually promisify this because at this is required in lifecycle // methods and we are not guaranteed that any 3rd party packages are installed // at this point -module.exports = function(command, opts) { +module.exports = function(command, opts, extraOpts = {}) { return new Promise(function(resolve, reject) { - return cp.exec(command, opts, function(err, result) { + const childProcess = cp.exec(command, opts, function(err, result) { if (err) { reject(err); } @@ -13,5 +13,12 @@ module.exports = function(command, opts) { resolve(result); } }); + + if (extraOpts.pipeOutput) { + childProcess.stdout.pipe(process.stdout); + childProcess.stderr.pipe(process.stderr); + } + + return childProcess; }); }; diff --git a/utils/getElectronOpenSSLRoot.js b/utils/getElectronOpenSSLRoot.js new file mode 100644 index 0000000000..a8ccc09b7b --- /dev/null +++ b/utils/getElectronOpenSSLRoot.js @@ -0,0 +1,10 @@ +const path = require("path"); + +if (process.argv.length < 3) { + process.exit(1); +} + +const [, , moduleRootDir] = process.argv; + +const openSSLRoot = process.env.npm_config_openssl_dir || path.join(moduleRootDir, 'vendor', 'openssl'); +process.stdout.write(openSSLRoot); diff --git a/utils/isBuildingForElectron.js b/utils/isBuildingForElectron.js new file mode 100644 index 0000000000..295f6ab1fb --- /dev/null +++ b/utils/isBuildingForElectron.js @@ -0,0 +1,30 @@ +const fs = require("fs") +const JSON5 = require("json5"); +const path = require("path"); + +if (process.argv.length < 3) { + process.exit(1); +} + +const last = arr => arr[arr.length - 1]; +const [, , nodeRootDir] = process.argv; + +let isElectron = last(nodeRootDir.split(path.sep)).startsWith("iojs"); + +if (!isElectron) { + try { + // Not ideal, would love it if there were a full featured gyp package to do this operation instead. + const { variables: { built_with_electron } } = JSON5.parse( + fs.readFileSync( + path.resolve(nodeRootDir, "include", "node", "config.gypi"), + "utf8" + ) + ); + + if (built_with_electron) { + isElectron = true; + } + } catch (e) {} +} + +process.stdout.write(isElectron ? "1" : "0"); diff --git a/utils/retry.js b/utils/retry.js new file mode 100644 index 0000000000..c7a57fb065 --- /dev/null +++ b/utils/retry.js @@ -0,0 +1,51 @@ +const { spawn } = require('child_process'); + +const [, , cmd, ...args] = process.argv; +if (!cmd) { + process.exit(-1); +} + +const once = (fn) => { + let runOnce = false; + return (...args) => { + if (runOnce) { + return; + } + + runOnce = true; + fn(...args); + } +}; + +const retry = (numRetries = 3) => { + const child = spawn(cmd, args, { + shell: process.platform === 'win32', + stdio: [0, 'pipe', 'pipe'] + }); + + child.setMaxListeners(0); + + child.stdout.setEncoding('utf8'); + child.stderr.setEncoding('utf8'); + + child.stdout.pipe(process.stdout); + child.stderr.pipe(process.stderr); + + const cleanupAndExit = once((error, status) => { + child.kill(); + if (numRetries > 0 && (error || status !== 0)) { + retry(numRetries - 1); + } else if (error) { + console.log(error); + process.exit(-1); + } else { + process.exit(status); + } + }); + const onClose = status => cleanupAndExit(null, status); + + child.on('close', onClose); + child.on('error', cleanupAndExit); +}; + +retry(); diff --git a/utils/uploadOpenSSL.mjs b/utils/uploadOpenSSL.mjs new file mode 100644 index 0000000000..5de760462e --- /dev/null +++ b/utils/uploadOpenSSL.mjs @@ -0,0 +1,32 @@ +import aws from 'aws-sdk'; +import fs from "fs"; +import path from "path"; + +import pkgJson from '../package.json' with { type: "json" }; +import { getOpenSSLPackagePath, getOpenSSLPackageName } from './acquireOpenSSL.mjs'; + +const s3 = new aws.S3(); + +const uploadBinaryToS3 = (fileName, bucketName, pathToFile) => + s3.upload({ + Body: fs.createReadStream(pathToFile), + Bucket: bucketName, + Key: fileName, + ACL: "public-read" + }).promise(); + +export const uploadOpenSSL = async () => { + const packageName = path.basename(getOpenSSLPackageName()); + const packagePath = getOpenSSLPackagePath(); + console.log(`Uploading ${packagePath} to s3://${pkgJson.binary.bucket_name}/${packageName}`); + await uploadBinaryToS3(packageName, pkgJson.binary.bucket_name, packagePath); + const sha256PackageName = `${packageName}.sha256`; + await uploadBinaryToS3(sha256PackageName, pkgJson.binary.bucket_name, `${packagePath}.sha256`); +}; + +if (process.argv[1] === import.meta.filename) { + uploadOpenSSL().catch((error) => { + console.error('Push to S3 failed: ', error); + process.exit(1); + }); +} diff --git a/vendor/libgit2 b/vendor/libgit2 index e634ccf4be..2644628edb 160000 --- a/vendor/libgit2 +++ b/vendor/libgit2 @@ -1 +1 @@ -Subproject commit e634ccf4be1ed60b89f0f3582ca8611d47c400d4 +Subproject commit 2644628edb8742338a952d40f5e9549b17480e3a diff --git a/vendor/libgit2.gyp b/vendor/libgit2.gyp index 11904835e3..aff29d76a2 100644 --- a/vendor/libgit2.gyp +++ b/vendor/libgit2.gyp @@ -7,7 +7,10 @@ "library%": "static_library", "openssl_enable_asm%": 0, # only supported with the Visual Studio 2012 (VC11) toolchain. "gcc_version%": 0, - "is_clang%": 0 + "is_electron%": " * Copyright (c) 2006-2007 The Written Word, Inc. * Copyright (c) 2007 Eli Fant - * Copyright (c) 2009-2014 Daniel Stenberg + * Copyright (c) 2009-2021 Daniel Stenberg * Copyright (C) 2008, 2009 Simon Josefsson + * Copyright (c) 2000 Markus Friedl + * Copyright (c) 2015 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, diff --git a/vendor/libssh2/Makefile.OpenSSL.inc b/vendor/libssh2/Makefile.OpenSSL.inc index 76f3e85cad..1e4e8f0bbb 100644 --- a/vendor/libssh2/Makefile.OpenSSL.inc +++ b/vendor/libssh2/Makefile.OpenSSL.inc @@ -1,2 +1,3 @@ CRYPTO_CSOURCES = openssl.c CRYPTO_HHEADERS = openssl.h +CRYPTO_LTLIBS = $(LTLIBSSL) diff --git a/vendor/libssh2/Makefile.WinCNG.inc b/vendor/libssh2/Makefile.WinCNG.inc index c18350eedf..bbcb82bfde 100644 --- a/vendor/libssh2/Makefile.WinCNG.inc +++ b/vendor/libssh2/Makefile.WinCNG.inc @@ -1,2 +1,3 @@ CRYPTO_CSOURCES = wincng.c CRYPTO_HHEADERS = wincng.h +CRYPTO_LTLIBS = $(LTLIBBCRYPT) $(LTLIBCRYPT32) diff --git a/vendor/libssh2/Makefile.am b/vendor/libssh2/Makefile.am index f7451e8143..986441bd68 100644 --- a/vendor/libssh2/Makefile.am +++ b/vendor/libssh2/Makefile.am @@ -43,7 +43,7 @@ os400/libssh2rpg/libssh2_publickey.rpgle \ os400/libssh2rpg/libssh2_sftp.rpgle \ Makefile.os400qc3.inc -EXTRA_DIST = $(WIN32FILES) buildconf $(NETWAREFILES) get_ver.awk \ +EXTRA_DIST = $(WIN32FILES) $(NETWAREFILES) get_ver.awk \ maketgz NMakefile RELEASE-NOTES libssh2.pc.in $(VMSFILES) config.rpath \ CMakeLists.txt cmake $(OS400FILES) @@ -119,7 +119,7 @@ $(DSP): win32/msvcproj.head win32/msvcproj.foot Makefile.am for file in $$sorted_hdrs; do \ echo "# Begin Source File"; \ echo ""; \ - if [ "$$file" == "libssh2_config.h" ]; \ + if [ "$$file" = "libssh2_config.h" ]; \ then \ echo "SOURCE=.\\"$$file; \ else \ @@ -147,3 +147,8 @@ $(VCPROJ): win32/vc8proj.head win32/vc8proj.foot Makefile.am done; \ cat $(srcdir)/vc8proj.foot) | \ awk '{printf("%s\r\n", gensub("\r", "", "g"))}' > $@ ) + +checksrc: + perl src/checksrc.pl -i4 -m79 -ASIZEOFNOPAREN -ASNPRINTF -ACOPYRIGHT \ + -AFOPENMODE -Wsrc/libssh2_config.h src/*.[ch] include/*.h example/*.c \ + tests/*.[ch] diff --git a/vendor/libssh2/Makefile.in b/vendor/libssh2/Makefile.in index d95397cf0b..c60873ab8e 100644 --- a/vendor/libssh2/Makefile.in +++ b/vendor/libssh2/Makefile.in @@ -1,7 +1,7 @@ -# Makefile.in generated by automake 1.15 from Makefile.am. +# Makefile.in generated by automake 1.16.4 from Makefile.am. # @configure_input@ -# Copyright (C) 1994-2014 Free Software Foundation, Inc. +# Copyright (C) 1994-2021 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -105,8 +105,7 @@ DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h \ - $(top_builddir)/example/libssh2_config.h +CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h CONFIG_CLEAN_FILES = libssh2.pc CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) @@ -174,7 +173,7 @@ am__recursive_targets = \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ - cscope distdir dist dist-all distcheck + cscope distdir distdir-am dist dist-all distcheck am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is @@ -192,9 +191,6 @@ am__define_uniq_tagged_files = \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -CSCOPE = cscope DIST_SUBDIRS = src tests docs example am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.inc \ $(srcdir)/libssh2.pc.in COPYING ChangeLog NEWS README compile \ @@ -238,6 +234,8 @@ am__relativize = \ DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best DIST_TARGETS = dist-gzip +# Exists only to be overridden by the user if desired. +AM_DISTCHECK_DVI_TARGET = dvi distuninstallcheck_listfiles = find . -type f -print am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' @@ -257,6 +255,12 @@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ @@ -267,13 +271,14 @@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ +ETAGS = @ETAGS@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ HAVE_LIBBCRYPT = @HAVE_LIBBCRYPT@ HAVE_LIBCRYPT32 = @HAVE_LIBCRYPT32@ HAVE_LIBGCRYPT = @HAVE_LIBGCRYPT@ -HAVE_LIBMBEDTLS = @HAVE_LIBMBEDTLS@ +HAVE_LIBMBEDCRYPTO = @HAVE_LIBMBEDCRYPTO@ HAVE_LIBSSL = @HAVE_LIBSSL@ HAVE_LIBZ = @HAVE_LIBZ@ INSTALL = @INSTALL@ @@ -289,8 +294,8 @@ LIBCRYPT32 = @LIBCRYPT32@ LIBCRYPT32_PREFIX = @LIBCRYPT32_PREFIX@ LIBGCRYPT = @LIBGCRYPT@ LIBGCRYPT_PREFIX = @LIBGCRYPT_PREFIX@ -LIBMBEDTLS = @LIBMBEDTLS@ -LIBMBEDTLS_PREFIX = @LIBMBEDTLS_PREFIX@ +LIBMBEDCRYPTO = @LIBMBEDCRYPTO@ +LIBMBEDCRYPTO_PREFIX = @LIBMBEDCRYPTO_PREFIX@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSREQUIRED = @LIBSREQUIRED@ @@ -300,12 +305,13 @@ LIBSSL_PREFIX = @LIBSSL_PREFIX@ LIBTOOL = @LIBTOOL@ LIBZ = @LIBZ@ LIBZ_PREFIX = @LIBZ_PREFIX@ +LIB_FUZZING_ENGINE = @LIB_FUZZING_ENGINE@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBBCRYPT = @LTLIBBCRYPT@ LTLIBCRYPT32 = @LTLIBCRYPT32@ LTLIBGCRYPT = @LTLIBGCRYPT@ -LTLIBMBEDTLS = @LTLIBMBEDTLS@ +LTLIBMBEDCRYPTO = @LTLIBMBEDCRYPTO@ LTLIBOBJS = @LTLIBOBJS@ LTLIBSSL = @LTLIBSSL@ LTLIBZ = @LTLIBZ@ @@ -341,6 +347,7 @@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ @@ -425,7 +432,7 @@ os400/libssh2rpg/libssh2_publickey.rpgle \ os400/libssh2rpg/libssh2_sftp.rpgle \ Makefile.os400qc3.inc -EXTRA_DIST = $(WIN32FILES) buildconf $(NETWAREFILES) get_ver.awk \ +EXTRA_DIST = $(WIN32FILES) $(NETWAREFILES) get_ver.awk \ maketgz NMakefile RELEASE-NOTES libssh2.pc.in $(VMSFILES) config.rpath \ CMakeLists.txt cmake $(OS400FILES) @@ -437,10 +444,11 @@ CRYPTO_CSOURCES = openssl.c wincng.c mbedtls.c CRYPTO_HHEADERS = openssl.h wincng.h mbedtls.h CSOURCES = channel.c comp.c crypt.c hostkey.c kex.c mac.c misc.c \ packet.c publickey.c scp.c session.c sftp.c userauth.c transport.c \ - version.c knownhost.c agent.c $(CRYPTO_CSOURCES) pem.c keepalive.c global.c + version.c knownhost.c agent.c $(CRYPTO_CSOURCES) pem.c keepalive.c global.c \ + blowfish.c bcrypt_pbkdf.c agent_win.c HHEADERS = libssh2_priv.h $(CRYPTO_HHEADERS) transport.h channel.h comp.h \ - mac.h misc.h packet.h userauth.h session.h sftp.h crypto.h + mac.h misc.h packet.h userauth.h session.h sftp.h crypto.h blf.h agent.h # Makefile.inc provides the CSOURCES and HHEADERS defines WIN32SOURCES = $(CSOURCES) @@ -469,8 +477,8 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ esac; $(srcdir)/Makefile.inc $(am__empty): @@ -641,8 +649,10 @@ cscopelist-am: $(am__tagged_files) distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags -rm -f cscope.out cscope.in.out cscope.po.out cscope.files +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am -distdir: $(DISTFILES) +distdir-am: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ @@ -710,7 +720,7 @@ distdir: $(DISTFILES) ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz $(am__post_remove_distdir) dist-bzip2: distdir @@ -725,6 +735,10 @@ dist-xz: distdir tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__post_remove_distdir) +dist-zstd: distdir + tardir=$(distdir) && $(am__tar) | zstd -c $${ZSTD_CLEVEL-$${ZSTD_OPT--19}} >$(distdir).tar.zst + $(am__post_remove_distdir) + dist-tarZ: distdir @echo WARNING: "Support for distribution archives compressed with" \ "legacy program 'compress' is deprecated." >&2 @@ -736,7 +750,7 @@ dist-shar: distdir @echo WARNING: "Support for shar distribution archives is" \ "deprecated." >&2 @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 - shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz + shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz $(am__post_remove_distdir) dist-zip: distdir @@ -754,7 +768,7 @@ dist dist-all: distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ - GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lz*) \ @@ -764,9 +778,11 @@ distcheck: dist *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ - GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ + *.tar.zst*) \ + zstd -dc $(distdir).tar.zst | $(am__untar) ;;\ esac chmod -R a-w $(distdir) chmod u+w $(distdir) @@ -782,7 +798,7 @@ distcheck: dist $(DISTCHECK_CONFIGURE_FLAGS) \ --srcdir=../.. --prefix="$$dc_install_base" \ && $(MAKE) $(AM_MAKEFLAGS) \ - && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) $(AM_DISTCHECK_DVI_TARGET) \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ @@ -948,7 +964,7 @@ uninstall-am: uninstall-includeHEADERS uninstall-pkgconfigDATA am--refresh check check-am clean clean-cscope clean-generic \ clean-libtool cscope cscopelist-am ctags ctags-am dist \ dist-all dist-bzip2 dist-gzip dist-hook dist-lzip dist-shar \ - dist-tarZ dist-xz dist-zip distcheck distclean \ + dist-tarZ dist-xz dist-zip dist-zstd distcheck distclean \ distclean-generic distclean-libtool distclean-tags \ distcleancheck distdir distuninstallcheck dvi dvi-am html \ html-am info info-am install install-am install-data \ @@ -1024,7 +1040,7 @@ $(DSP): win32/msvcproj.head win32/msvcproj.foot Makefile.am for file in $$sorted_hdrs; do \ echo "# Begin Source File"; \ echo ""; \ - if [ "$$file" == "libssh2_config.h" ]; \ + if [ "$$file" = "libssh2_config.h" ]; \ then \ echo "SOURCE=.\\"$$file; \ else \ @@ -1053,6 +1069,11 @@ $(VCPROJ): win32/vc8proj.head win32/vc8proj.foot Makefile.am cat $(srcdir)/vc8proj.foot) | \ awk '{printf("%s\r\n", gensub("\r", "", "g"))}' > $@ ) +checksrc: + perl src/checksrc.pl -i4 -m79 -ASIZEOFNOPAREN -ASNPRINTF -ACOPYRIGHT \ + -AFOPENMODE -Wsrc/libssh2_config.h src/*.[ch] include/*.h example/*.c \ + tests/*.[ch] + # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: diff --git a/vendor/libssh2/Makefile.inc b/vendor/libssh2/Makefile.inc index 8f2e570cb5..20d2ebeeb2 100644 --- a/vendor/libssh2/Makefile.inc +++ b/vendor/libssh2/Makefile.inc @@ -1,6 +1,7 @@ CSOURCES = channel.c comp.c crypt.c hostkey.c kex.c mac.c misc.c \ packet.c publickey.c scp.c session.c sftp.c userauth.c transport.c \ - version.c knownhost.c agent.c $(CRYPTO_CSOURCES) pem.c keepalive.c global.c + version.c knownhost.c agent.c $(CRYPTO_CSOURCES) pem.c keepalive.c global.c \ + blowfish.c bcrypt_pbkdf.c agent_win.c HHEADERS = libssh2_priv.h $(CRYPTO_HHEADERS) transport.h channel.h comp.h \ - mac.h misc.h packet.h userauth.h session.h sftp.h crypto.h + mac.h misc.h packet.h userauth.h session.h sftp.h crypto.h blf.h agent.h diff --git a/vendor/libssh2/Makefile.libgcrypt.inc b/vendor/libssh2/Makefile.libgcrypt.inc index 5d56292ce8..0a3aae9aad 100644 --- a/vendor/libssh2/Makefile.libgcrypt.inc +++ b/vendor/libssh2/Makefile.libgcrypt.inc @@ -1,2 +1,3 @@ CRYPTO_CSOURCES = libgcrypt.c CRYPTO_HHEADERS = libgcrypt.h +CRYPTO_LTLIBS = $(LTLIBGCRYPT) diff --git a/vendor/libssh2/Makefile.mbedTLS.inc b/vendor/libssh2/Makefile.mbedTLS.inc index 7e9786429d..b9f19fce1a 100644 --- a/vendor/libssh2/Makefile.mbedTLS.inc +++ b/vendor/libssh2/Makefile.mbedTLS.inc @@ -1,2 +1,3 @@ CRYPTO_CSOURCES = mbedtls.c CRYPTO_HHEADERS = mbedtls.h +CRYPTO_LTLIBS = $(LTLIBMBEDCRYPTO) diff --git a/vendor/libssh2/NEWS b/vendor/libssh2/NEWS index e3caaece5e..7e22b3dd85 100644 --- a/vendor/libssh2/NEWS +++ b/vendor/libssh2/NEWS @@ -1,5491 +1,6831 @@ Changelog for the libssh2 project. Generated with git2news.pl -Version 1.8.0 (25 Oct 2016) - -Daniel Stenberg (25 Oct 2016) -- RELEASE-NOTES: adjusted for 1.8.0 +Daniel Stenberg (29 Aug 2021) +- [Will Cosgrove brought this change] -Kamil Dudka (20 Oct 2016) -- Revert "aes: the init function fails when OpenSSL has AES support" - - This partially reverts commit f4f2298ef3635acd031cc2ee0e71026cdcda5864 - because it caused the compatibility code to call initialization routines - redundantly, leading to memory leakage with OpenSSL 1.1 and broken curl - test-suite in Fedora: - - 88 bytes in 1 blocks are definitely lost in loss record 5 of 8 - at 0x4C2DB8D: malloc (vg_replace_malloc.c:299) - by 0x72C607D: CRYPTO_zalloc (mem.c:100) - by 0x72A2480: EVP_CIPHER_meth_new (cmeth_lib.c:18) - by 0x4E5A550: make_ctr_evp.isra.0 (openssl.c:407) - by 0x4E5A8E8: _libssh2_init_aes_ctr (openssl.c:471) - by 0x4E5BB5A: libssh2_init (global.c:49) + updated docs for 1.10.0 release -Daniel Stenberg (19 Oct 2016) -- [Charles Collicutt brought this change] +Marc Hörsken (30 May 2021) +- [Laurent Stacul brought this change] - libssh2_wait_socket: Fix comparison with api_timeout to use milliseconds (#134) + [tests] Try several times to connect the ssh server - Fixes #74 + Sometimes, as the OCI container is run in detached mode, it is possible + the actual server is not ready yet to handle SSH traffic. The goal of + this PR is to try several times (max 3). The mechanism is the same as + for the connection to the docker machine. -- [Charles Collicutt brought this change] +- [Laurent Stacul brought this change] - Set err_msg on _libssh2_wait_socket errors (#135) + Remove openssh_server container on test exit -- Revert "travis: Test mbedtls too" +- [Laurent Stacul brought this change] + + Allow the tests to run inside a container - This reverts commit 3e6de50a24815e72ec5597947f1831f6083b7da8. + The current tests suite starts SSH server as OCI container. This commit + add the possibility to run the tests in a container provided that: - Travis doesn't seem to support the mbedtls-dev package + * the docker client is installed builder container + * the host docker daemon unix socket has been mounted in the builder + container (with, if needed, the DOCKER_HOST environment variable + accordingly set, and the permission to write on this socket) + * the builder container is run on the default bridge network, or the + host network. This PR does not handle the case where the builder + container is on another network. -- maketgz: support "only" to only update version number locally +Marc Hoersken (28 May 2021) +- CI/appveyor: run SSH server for tests on GitHub Actions (#607) - and fix the date output locale - -- configure: make the --with-* options override the OpenSSL default + No longer rely on DigitalOcean to host the Docker container. - ... previously it would default to OpenSSL even with the --with-[crypto] - options used unless you specificly disabled OpenSSL. Now, enabling another - backend will automatically disable OpenSSL if the other one is found. - -- [Keno Fischer brought this change] - - docs: Add documentation on new cmake/configure options - -- [Keno Fischer brought this change] - - configure: Add support for building with mbedtls - -- [wildart brought this change] - - travis: Test mbedtls too + Unfortunately we require a small dispatcher script that has + access to a GitHub access token with scope repo in order to + trigger the daemon workflow on GitHub Actions also for PRs. + + This script is hosted by myself for the time being until GitHub + provides a tighter scope to trigger the workflow_dispatch event. -- [wildart brought this change] +GitHub (26 May 2021) +- [Will Cosgrove brought this change] - crypto: add support for the mbedTLS backend + openssl.c: guards around calling FIPS_mode() #596 (#603) - Closes #132 - -- [wildart brought this change] + Notes: + FIPS_mode() is not implemented in LibreSSL and this API is removed in OpenSSL 3.0 and was introduced in 0.9.7. Added guards around making this call. + + Credit: + Will Cosgrove - cmake: Add CLEAR_MEMORY option, analogously to that for autoconf +- [Will Cosgrove brought this change] -- README.md: fix link typo + configure.ac: don't undefine scoped variable (#594) + + * configure.ac: don't undefine scoped variable + + To get this script to run with Autoconf 2.71 on macOS I had to remove the undefine of the backend for loop variable. It seems scoped to the for loop and also isn't referenced later in the script so it seems OK to remove it. + + * configure.ac: remove cygwin specific CFLAGS #598 + + Notes: + Remove cygwin specific Win32 CFLAGS and treat the build like a posix build + + Credit: + Will Cosgrove, Brian Inglis -- README: markdown version to look nicer on github +- [Laurent Stacul brought this change] -Viktor Szakats (5 Sep 2016) -- [Taylor Holberton brought this change] + tests: Makefile.am: Add missing tests client keys in distribution tarball (#604) + + Notes: + Added missing test keys. + + Credit: + Laurent Stacul - openssl: add OpenSSL 1.1.0 compatibility +- [Laurent Stacul brought this change] -Daniel Stenberg (4 Sep 2016) -- [Antenore Gatta brought this change] + Makefile.am: Add missing test keys in the distribution tarball (#601) + + Notes: + Fix tests missing key to build the OCI image + + Credit: + Laurent Stacul - tests: HAVE_NETINET_IN_H was not defined correctly (#127) +Daniel Stenberg (16 May 2021) +- dist: add src/agent.h - Fixes #125 + Fixes #597 + Closes #599 -- SECURITY: fix web site typo +GitHub (12 May 2021) +- [Will Cosgrove brought this change] -- SECURITY: security process + packet.c: Reset read timeout after received a packet (#576) (#586) + + File: + packet.c + + Notes: + Attempt keyboard interactive login (Azure AD 2FA login) and use more than 60 seconds to complete the login, the connection fails. + + The _libssh2_packet_require function does almost the same as _libssh2_packet_requirev but this function sets state->start = 0 before returning. + + Credit: + teottin, Co-authored-by: Tor Erik Ottinsen -GitHub (14 Aug 2016) -- [Alexander Lamaison brought this change] +- [kkoenig brought this change] - Basic dockerised test suite. + Support ECDSA certificate authentication (#570) - This introduces a test suite for libssh2. It runs OpenSSH in a Docker - container because that works well on Windows (via docker-machine) as - well as Linux. Presumably it works on Mac too with docker-machine, but - I've not tested that. + Files: hostkey.c, userauth.c, test_public_key_auth_succeeds_with_correct_ecdsa_key.c - Because the test suite is docker-machine aware, you can also run it - against a cloud provider, for more realistic network testing, by setting - your cloud provider as your active docker machine. The Appveyor CI setup - in this commit does that because Appveyor doesn't support docker - locally. + Notes: + Support ECDSA certificate authentication + + Add a test for: + - Existing ecdsa basic public key authentication + - ecdsa public key authentication with a signed public key + + Credit: + kkoenig -Kamil Dudka (3 Aug 2016) -- [Viktor Szakats brought this change] +- [Gabriel Smith brought this change] - misc.c: Delete unused static variables + agent.c: Add support for Windows OpenSSH agent (#517) - Closes #114 + Files: agent.c, agent.h, agent_win.c + + Notes: + * agent: Add support for Windows OpenSSH agent + + The implementation was partially taken and modified from that found in + the Portable OpenSSH port to Win32 by the PowerShell team, but mostly + based on the existing Unix OpenSSH agent support. + + https://github.com/PowerShell/openssh-portable + + Regarding the partial transfer support implementation: partial transfers + are easy to deal with, but you need to track additional state when + non-blocking IO enters the picture. A tracker of how many bytes have + been transfered has been placed in the transfer context struct as that's + where it makes most sense. This tracker isn't placed behind a WIN32 + #ifdef as it will probably be useful for other agent implementations. + + * agent: win32 openssh: Disable overlapped IO + + Non-blocking IO is not currently supported by the surrounding agent + code, despite a lot of the code having everything set up to handle it. + + Credit: + Co-authored-by: Gabriel Smith -Daniel Stenberg (9 Apr 2016) -- [Will Cosgrove brought this change] +- [Zenju brought this change] - Merge pull request #103 from willco007/patch-2 + Fix detailed _libssh2_error being overwritten (#473) - Fix for security issue CVE-2016-0787 + Files: openssl.c, pem.c, userauth.c + + Notes: + * Fix detailed _libssh2_error being overwritten by generic errors + * Unified error handling + + Credit: + Zenju -Alexander Lamaison (2 Apr 2016) -- [Zenju brought this change] +- [Paul Capron brought this change] - Fix MSVC 14 compilation errors + Fix _libssh2_random() silently discarding errors (#520) - For _MSC_VER == 1900 these macros are not needed and create problems: + Notes: + * Make _libssh2_random return code consistent + Previously, _libssh2_random was advertized in HACKING.CRYPTO as + returning `void` (and was implemented that way in os400qc3.c), but that + was in other crypto backends a lie; _libssh2_random is (a macro + expanding) to an int-value expression or function. + Moreover, that returned code was: + — 0 or success, -1 on error for the MbedTLS & WinCNG crypto backends + But also: + — 1 on success, -1 or 0 on error for the OpenSSL backend! + – 1 on success, error cannot happen for libgcrypt! - 1>C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt\stdio.h(1925): warning C4005: 'snprintf': macro redefinition (compiling source file libssh2-files\src\mac.c) + This commit makes explicit that _libssh2_random can fail (because most of + the underlying crypto functions can indeed fail!), and it makes its result + code consistent: 0 on success, -1 on error. - 1> \win32\libssh2_config.h(27): note: see previous definition of 'snprintf' (compiling source file libssh2-files\src\mac.c) + This is related to issue #519 https://github.com/libssh2/libssh2/issues/519 + It fixes the first half of it. - 1>C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt\stdio.h(1927): fatal error C1189: #error: Macro definition of snprintf conflicts with Standard Library function declaration (compiling source file libssh2-files\src\mac.c) - -Daniel Stenberg (26 Mar 2016) -- [Brad Harder brought this change] - - _libssh2_channel_open: speeling error fixed in channel error message - -Alexander Lamaison (15 Mar 2016) -- Link with crypt32.lib on Windows. + * Don't silent errors of _libssh2_random - Makes linking with static OpenSSL work again. Although it's not - required for dynamic OpenSSL, it does no harm. + Make sure to check the returned code of _libssh2_random(), and + propagates any failure. - Fixes #98. + A new LIBSSH_ERROR_RANDGEN constant is added to libssh2.h + None of the existing error constants seemed fit. + + This commit is related to d74285b68450c0e9ea6d5f8070450837fb1e74a7 + and to https://github.com/libssh2/libssh2/issues/519 (see the issue + for more info.) It closes #519. + + Credit: + Paul Capron -- [Craig A. Berry brought this change] +- [Gabriel Smith brought this change] - Tweak VMS help file building. + ci: Remove caching of docker image layers (#589) - Primarily this is handling cases where top-level files moved into - the docs/ directory. I also corrected a typo and removed the - claim that libssh2 is public domain. + Notes: + continued ci reliability work. + + Credit: + Gabriel Smith -- [Craig A. Berry brought this change] +- [Gabriel Smith brought this change] - Build with standard stat structure on VMS. + ci: Speed up docker builds for tests (#587) - This gets us large file support, is available on any VMS release - in the last decade and more, and gives stat other modern features - such as 64-bit ino_t. + Notes: + The OpenSSH server docker image used for tests is pre-built to prevent + wasting time building it during a test, and unneeded rebuilds are + prevented by caching the image layers. + + Credit: + Gabriel Smith -- [Craig A. Berry brought this change] +- [Will Cosgrove brought this change] - Update vms/libssh2_config.h. + userauth.c: don't error if using keys without RSA (#555) - VMS does have stdlib.h, gettimeofday(), and OpenSSL. The latter - is appropriate to hard-wire in the configuration because it's - installed by default as part of the base operating system and - there is currently no libgcrypt port. + file: userauth.c + + notes: libssh2 now supports many other key types besides RSA, if the library is built without RSA support and a user attempts RSA auth it shouldn't be an automatic error + + credit: + Will Cosgrove -- [Craig A. Berry brought this change] +- [Marc brought this change] - VMS can't use %zd for off_t format. + openssl.c: Avoid OpenSSL latent error in FIPS mode (#528) - %z is a C99-ism that VMS doesn't currently have; even though the - compiler is C99-compliant, the library isn't quite. The off_t used - for the st_size element of the stat can be 32-bit or 64-bit, so - detect what we've got and pick a format accordingly. + File: + openssl.c + + Notes: + Avoid initing MD5 digest, which is not permitted in OpenSSL FIPS certified cryptography mode. + + Credit: + Marc -- [Craig A. Berry brought this change] +- [Laurent Stacul brought this change] - Normalize line endings in libssh2_sftp_get_channel.3. + openssl.c: Fix EVP_Cipher interface change in openssl 3 #463 - Somehow it got Windows-style CRLF endings so convert to just LF, - for consistency as well as not to confuse tools that will regard - the \r as content (e.g. the OpenVMS help librarian). - -Dan Fandrich (29 Feb 2016) -- libgcrypt: Fixed a NULL pointer dereference on OOM + File: + openssl.c + + Notes: + Fixes building with OpenSSL 3, #463. + + The change is described there: + https://github.com/openssl/openssl/commit/f7397f0d58ce7ddf4c5366cd1846f16b341fbe43 + + Credit: + Laurent Stacul, reported by Sergei -Daniel Stenberg (24 Feb 2016) -- [Viktor Szakats brought this change] +- [Gabriel Smith brought this change] - url updates, HTTP => HTTPS + openssh_fixture.c: Fix potential overwrite of buffer when reading stdout of command (#580) - Closes #87 - -Dan Fandrich (23 Feb 2016) -- RELEASE-NOTES: removed some duplicated names + File: + openssh_fixture.c + Notes: + If reading the full output from the executed command took multiple + passes (such as when reading multiple lines) the old code would read + into the buffer starting at the some position (the start) every time. + The old code only works if fgets updated p or had an offset parameter, + both of which are not true. + + Credit: + Gabriel Smith -Version 1.7.0 (23 Feb 2016) +- [Gabriel Smith brought this change] -Daniel Stenberg (23 Feb 2016) -- web: the site is now HTTPS + ci: explicitly state the default branch (#585) + + Notes: + It looks like the $default-branch macro only works in templates, not + workflows. This is not explicitly stated anywhere except the linked PR + comment. + + https://github.com/actions/starter-workflows/pull/590#issuecomment-672360634 + + credit: + Gabriel Smith -- RELEASE-NOTES: 1.7.0 release +- [Gabriel Smith brought this change] -- diffie_hellman_sha256: convert bytes to bits + ci: Swap from Travis to Github Actions (#581) - As otherwise we get far too small numbers. + Files: ci files - Reported-by: Andreas Schneider + Notes: + Move Linux CI using Github Actions - CVE-2016-0787 + Credit: + Gabriel Smith, Marc Hörsken -Alexander Lamaison (18 Feb 2016) -- Allow CI failures with VS 2008 x64. +- [Mary brought this change] + + libssh2_priv.h: add iovec on 3ds (#575) - Appveyor doesn't support this combination. + file: libssh2_priv.h + note: include iovec for 3DS + credit: Mary Mstrodl -Daniel Stenberg (16 Feb 2016) -- [Viktor Szakats brought this change] +- [Laurent Stacul brought this change] - GNUmakefile: list system libs after user libs + Tests: Fix unused variables warning (#561) - Otherwise some referenced WinSock functions will fail to - resolve when linking against LibreSSL 2.3.x static libraries - with mingw. + file: test_public_key_auth_succeeds_with_correct_ed25519_key_from_mem.c - Closes #80 + notes: fixed unused vars + + credit: + Laurent Stacul - [Viktor Szakats brought this change] - openssl: apply new HAVE_OPAQUE_STRUCTS macro + bcrypt_pbkdf.c: fix clang10 false positive warning (#563) - Closes #81 - -- [Viktor Szakats brought this change] + File: bcrypt_pbkdf.c + + Notes: + blf_enc() takes a number of 64-bit blocks to encrypt, but using + sizeof(uint64_t) in the calculation triggers a warning with + clang 10 because the actual data type is uint32_t. Pass + BCRYPT_BLOCKS / 2 for the number of blocks like libc bcrypt(3) + does. + + Ref: https://github.com/openbsd/src/commit/04a2240bd8f465bcae6b595d912af3e2965856de + + Fixes #562 + + Credit: + Viktor Szakats - openssl: fix LibreSSL support after OpenSSL 1.1.0-pre1/2 support +- [Will Cosgrove brought this change] -Alexander Lamaison (14 Feb 2016) -- sftp.h: Fix non-C90 type. + transport.c: release payload on error (#554) - uint64_t does not exist in C90. Use libssh2_uint64_t instead. + file: transport.c + notes: If the payload is invalid and there is an early return, we could leak the payload + credit: + Will Cosgrove -- Exclude sshd tests from AppVeyor. +- [Will Cosgrove brought this change] + + ssh2_client_fuzzer.cc: fixed building - They fail complaining that sshd wasn't invoked with an absolute path. + The GitHub web editor did some funky things -- Test on more versions of Visual Studio. +- [Will Cosgrove brought this change] -- Fix Appveyor builds. + ssh_client_fuzzer.cc: set blocking mode on (#553) + + file: ssh_client_fuzzer.cc + + notes: the session needs blocking mode turned on to avoid EAGAIN being returned from libssh2_session_handshake() + + credit: + Will Cosgrove, reviewed by Michael Buckley -Daniel Stenberg (14 Feb 2016) -- [Viktor Szakats brought this change] +- [Etienne Samson brought this change] - openssl: add OpenSSL 1.1.0-pre3-dev compatibility + Add a LINT option to CMake (#372) - by using API instead of accessing an internal structure. + * ci: make style-checking available locally - Closes #83 + * cmake: add a linting target + + * tests: check test suite syntax with checksrc.pl -- RELEASE-NOTES: synced with 996b04ececdf +- [Will Cosgrove brought this change] -- include/libssh2.h: next version is 1.7.0 + kex.c: kex_agree_instr() improve string reading (#552) + + * kex.c: kex_agree_instr() improve string reading + + file: kex.c + notes: if haystack isn't null terminated we should use memchr() not strchar(). We should also make sure we don't walk off the end of the buffer. + credit: + Will Cosgrove, reviewed by Michael Buckley -- configure: build "silent" if possible +- [Will Cosgrove brought this change] -- sftp: re-indented some minor stuff + kex.c: use string_buf in ecdh_sha2_nistp (#551) + + * kex.c: use string_buf in ecdh_sha2_nistp + + file: kex.c + + notes: + use string_buf in ecdh_sha2_nistp() to avoid attempting to parse malformed data -- [Jakob Egger brought this change] +- [Will Cosgrove brought this change] - sftp.c: ensure minimum read packet size + kex.c: move EC macro outside of if check #549 (#550) - For optimum performance we need to ensure we don't request tiny packets. + File: kex.c + + Notes: + Moved the macro LIBSSH2_KEX_METHOD_EC_SHA_HASH_CREATE_VERIFY outside of the LIBSSH2_ECDSA since it's also now used by the ED25519 code. + + Sha 256, 384 and 512 need to be defined for all backends now even if they aren't used directly. I believe this is already the case, but just a heads up. + + Credit: + Stefan-Ghinea -- [Jakob Egger brought this change] +- [Tim Gates brought this change] - sftp.c: Explicit return values & sanity checks - -- [Jakob Egger brought this change] - - sftp.c: Check Read Packet File Offset + kex.c: fix simple typo, niumber -> number (#545) - This commit adds a simple check to see if the offset of the read - request matches the expected file offset. + File: kex.c - We could try to recover, from this condition at some point in the future. - Right now it is better to return an error instead of corrupted data. - -- [Jakob Egger brought this change] - - sftp.c: Don't return EAGAIN if data was written to buffer - -- [Jakob Egger brought this change] - - sftp.c: Send at least one read request before reading + Notes: + There is a small typo in src/kex.c. - This commit ensures that we have sent at least one read request before - we try to read data in sftp_read(). + Should read `number` rather than `niumber`. - Otherwise sftp_read() would return 0 bytes (indicating EOF) if the - socket is not ready for writing. + Credit: + Tim Gates -- [Jakob Egger brought this change] +- [Tseng Jun brought this change] - sftp.c: stop reading when buffer is full + session.c: Correct a typo which may lead to stack overflow (#533) - Since we can only store data from a single chunk in filep, - we have to stop receiving data as soon as the buffer is full. + File: session.c - This adresses the following bug report: - https://github.com/libssh2/libssh2/issues/50 + Notes: + Seems the author intend to terminate banner_dup buffer, later, print it to the debug console. + + Author: + Tseng Jun -Salvador Fandiño (21 Jan 2016) -- agent_disconnect_unix: unset the agent fd after closing it +Marc Hoersken (10 Oct 2020) +- wincng: fix random big number generation to match openssl - "agent_disconnect_unix", called by "libssh2_agent_disconnect", was - leaving the file descriptor in the agent structure unchanged. Later, - "libssh2_agent_free" would call again "libssh2_agent_disconnect" under - the hood and it would try to close again the same file descriptor. In - most cases that resulted in just a harmless error, but it is also - possible that the file descriptor had been reused between the two - calls resulting in the closing of an unrelated file descriptor. + The old function would set the least significant bits in + the most significant byte instead of the most significant bits. - This patch sets agent->fd to LIBSSH2_INVALID_SOCKET avoiding that - issue. + The old function would also zero pad too much bits in the + most significant byte. This lead to a reduction of key space + in the most significant byte according to the following listing: + - 8 bits reduced to 0 bits => eg. 2048 bits to 2040 bits DH key + - 7 bits reduced to 1 bits => eg. 2047 bits to 2041 bits DH key + - 6 bits reduced to 2 bits => eg. 2046 bits to 2042 bits DH key + - 5 bits reduced to 3 bits => eg. 2045 bits to 2043 bits DH key - Signed-off-by: Salvador Fandiño + No change would occur for the case of 4 significant bits. + For 1 to 3 significant bits in the most significant byte + the DH key would actually be expanded instead of reduced: + - 3 bits expanded to 5 bits => eg. 2043 bits to 2045 bits DH key + - 2 bits expanded to 6 bits => eg. 2042 bits to 2046 bits DH key + - 1 bits expanded to 7 bits => eg. 2041 bits to 2047 bits DH key + + There is no case of 0 significant bits in the most significant byte + since this would be a case of 8 significant bits in the next byte. + + At the moment only the following case applies due to a fixed + DH key size value currently being used in libssh2: + + The DH group_order is fixed to 256 (bytes) which leads to a + 2047 bits DH key size by calculating (256 * 8) - 1. + + This means the DH keyspace was previously reduced from 2047 bits + to 2041 bits (while the top and bottom bits are always set), so the + keyspace is actually always reduced from 2045 bits to 2039 bits. + + All of this is only relevant for Windows versions supporting the + WinCNG backend (Vista or newer) before Windows 10 version 1903. + + Closes #521 -Daniel Stenberg (18 Jan 2016) -- [Patrick Monnerat brought this change] +Daniel Stenberg (28 Sep 2020) +- libssh2_session_callback_set.3: explain the recv/send callbacks + + Describe how to actually use these callbacks. + + Closes #518 - os400qc3: support encrypted private keys +GitHub (23 Sep 2020) +- [Will Cosgrove brought this change] + + agent.c: formatting - PKCS#8 EncryptedPrivateKeyinfo structures are recognized and decoded to get - values accepted by the Qc3 crypto library. + Improved formatting of RECV_SEND_ALL macro. -- [Patrick Monnerat brought this change] +- [Will Cosgrove brought this change] - os400qc3: New PKCS#5 decoder + CMakeLists.txt: respect install lib dir #405 (#515) - The Qc3 library is not able to handle PKCS#8 EncryptedPrivateKeyInfo structures - by itself. It is only capable of decrypting the (encrypted) PrivateKeyInfo - part, providing a key encryption key and an encryption algorithm are given. - Since the encryption key and algorithm description part in a PKCS#8 - EncryptedPrivateKeyInfo is a PKCS#5 structure, such a decoder is needed to - get the derived key method and hash, as well as encryption algorith and - initialisation vector. + Files: + CMakeLists.txt + + Notes: + Use CMAKE_INSTALL_LIBDIR directory + + Credit: Arfrever -- [Patrick Monnerat brought this change] +- [Will Cosgrove brought this change] - os400qc3: force continuous update on non-final hash/hmac computation + kex.c: group16-sha512 and group18-sha512 support #457 (#468) + + Files: kex.c + + Notes: + Added key exchange group16-sha512 and group18-sha512. As a result did the following: + + Abstracted diffie_hellman_sha256() to diffie_hellman_sha_algo() which is now algorithm agnostic and takes the algorithm as a parameter since we needed sha512 support. Unfortunately it required some helper functions but they are simple. + Deleted diffie_hellman_sha1() + Deleted diffie_hellman_sha1 specific macro + Cleaned up some formatting + Defined sha384 in os400 and wincng backends + Defined LIBSSH2_DH_MAX_MODULUS_BITS to abort the connection if we receive too large of p from the server doing sha1 key exchange. + Reorder the default key exchange list to match OpenSSH and improve security + + Credit: + Will Cosgrove -- [Patrick Monnerat brought this change] +- [Igor Klevanets brought this change] - os400qc3: Be sure hmac keys have a minimum length + agent.c: Recv and send all bytes via network in agent_transact_unix() (#510) - The Qc3 library requires a minimum key length depending on the target - hash algorithm. Append binary zeroes to the given key if not long enough. - This matches RFC 2104 specifications. + Files: agent.c + + Notes: + Handle sending/receiving partial packet replies in agent.c API. + + Credit: Klevanets Igor -- [Patrick Monnerat brought this change] +- [Daniel Stenberg brought this change] - os400qc3: Slave descriptor for key encryption key + Makefile.am: include all test files in the dist #379 - The Qc3 library requires the key encryption key to exist as long as - the encrypted key is used. Its descriptor token is then kept as an - "encrypted key slave" for recursive release. + File: + Makefile.am + + Notes: + No longer conditionally include OpenSSL specific test files, they aren't run if we're not building against OpenSSL 1.1.x anyway. + + Credit: + Daniel Stenberg -- [Patrick Monnerat brought this change] +- [Max Dymond brought this change] - os400qc3.c: comment PEM/DER decoding + Add support for an OSS Fuzzer fuzzing target (#392) + + Files: + .travis.yml, configure.ac, ossfuzz + + Notes: + This adds support for an OSS-Fuzz fuzzing target in ssh2_client_fuzzer, + which is a cut down example of ssh2.c. Future enhancements can improve + coverage. + + Credit: + Max Dymond -- [Patrick Monnerat brought this change] +- [Sebastián Katzer brought this change] - os400qc3.c: improve ASN.1 header byte checks + mbedtls.c: ECDSA support for mbed TLS (#385) + + Files: + mbedtls.c, mbedtls.h, .travis.yml + + Notes: + This PR adds support for ECDSA for both key exchange and host key algorithms. + + The following elliptic curves are supported: + + 256-bit curve defined by FIPS 186-4 and SEC1 + 384-bit curve defined by FIPS 186-4 and SEC1 + 521-bit curve defined by FIPS 186-4 and SEC1 + + Credit: + Sebastián Katzer -- [Patrick Monnerat brought this change] +Marc Hoersken (1 Sep 2020) +- buildconf: exec autoreconf to avoid additional process (#512) + + Also make buildconf exit with the return code of autoreconf. + + Follow up to #224 - os400qc3.c: improve OID matching +- scp.c: fix indentation in shell_quotearg documentation -- [Patrick Monnerat brought this change] +- wincng: make more use of new helper functions (#496) - os400: os400qc3.c: replace malloc by LIBSSH2_ALLOC or alloca where possible +- wincng: make sure algorithm providers are closed once (#496) -- [Patrick Monnerat brought this change] +GitHub (10 Jul 2020) +- [David Benjamin brought this change] - os400: asn1_new_from_bytes(): use data from a single element only + openssl.c: clean up curve25519 code (#499) + + File: openssl.c, openssl.h, crypto.h, kex.c + + Notes: + This cleans up a few things in the curve25519 implementation: + + - There is no need to create X509_PUBKEYs or PKCS8_PRIV_KEY_INFOs to + extract key material. EVP_PKEY_get_raw_private_key and + EVP_PKEY_get_raw_public_key work fine. + + - libssh2_x25519_ctx was never used (and occasionally mis-typedefed to + libssh2_ed25519_ctx). Remove it. The _libssh2_curve25519_new and + _libssh2_curve25519_gen_k interfaces use the bytes. Note, if it needs + to be added back, there is no need to roundtrip through + EVP_PKEY_new_raw_private_key. EVP_PKEY_keygen already generated an + EVP_PKEY. + + - Add some missing error checks. + + Credit: + David Benjamin -- [Patrick Monnerat brought this change] +- [Will Cosgrove brought this change] - os400: fix an ILE/RPG prototype + transport.c: socket is disconnected, return error (#500) + + File: transport.c + + Notes: + This is to fix #102, instead of continuing to attempt to read a disconnected socket, it will now error out. + + Credit: + TDi-jonesds -- [Patrick Monnerat brought this change] +- [Will Cosgrove brought this change] - os400: implement character encoding conversion support + stale.yml + + Increasing stale values. -- [Patrick Monnerat brought this change] +Marc Hoersken (6 Jul 2020) +- wincng: try newer DH API first, fallback to legacy RSA API + + Avoid the use of RtlGetVersion or similar Win32 functions, + since these depend on version information from manifests. + + This commit makes the WinCNG backend first try to use the + new DH algorithm API with the raw secret derivation feature. + In case this feature is not available the WinCNG backend + will fallback to the classic approach of using RSA-encrypt + to perform the required modular exponentiation of BigNums. + + The feature availability test is done during the first handshake + and the result is stored in the crypto backends global state. + + Follow up to #397 + Closes #484 - os400: do not miss some external prototypes +- wincng: fix indentation of function arguments and comments - Build procedure extproto() did not strip braces from header files, thus - possibly prepended them to true prototypes. This prevented the prototype to - be recognized as such. - The solution implemented here is to map braces to semicolons, effectively - considering them as potential prototype delimiters. + Follow up to #397 -- [Patrick Monnerat brought this change] +- [Wez Furlong brought this change] - os400: Really add specific README + wincng: use newer DH API for Windows 8.1+ + + Since Windows 1903 the approach used to perform DH kex with the CNG + API has been failing. + + This commit switches to using the `DH` algorithm provider to perform + generation of the key pair and derivation of the shared secret. + + It uses a feature of CNG that is not yet documented. The sources of + information that I've found on this are: + + * https://stackoverflow.com/a/56378698/149111 + * https://github.com/wbenny/mini-tor/blob/5d39011e632be8e2b6b1819ee7295e8bd9b7a769/mini/crypto/cng/dh.inl#L355 + + With this change I am able to successfully connect from Windows 10 to my + ubuntu system. + + Refs: https://github.com/alexcrichton/ssh2-rs/issues/122 + Fixes: https://github.com/libssh2/libssh2/issues/388 + Closes: https://github.com/libssh2/libssh2/pull/397 -- [Patrick Monnerat brought this change] +GitHub (1 Jul 2020) +- [Zenju brought this change] - os400: Add specific README and include new files in dist tarball + comp.c: Fix name clash with ZLIB macro "compress" (#418) + + File: comp.c + + Notes: + * Fix name clash with ZLIB macro "compress". + + Credit: + Zenju -- [Patrick Monnerat brought this change] +- [yann-morin-1998 brought this change] - os400: add compilation scripts + buildsystem: drop custom buildconf script, rely on autoreconf (#224) + + Notes: + The buildconf script is currently required, because we need to copy a + header around, because it is used both from the library and the examples + sources. + + However, having a custom 'buildconf'-like script is not needed if we can + ensure that the header exists by the time it is needed. For that, we can + just append the src/ directory to the headers search path for the + examples. + + And then it means we no longer need to generate the same header twice, + so we remove the second one from configure.ac. + + Now, we can just call "autoreconf -fi" to generate the autotools files, + instead of relying on the canned sequence in "buildconf", since + autoreconf has now long known what to do at the correct moment (future + versions of autotools, automake, autopoint, autoheader etc... may + require an other ordering, or other intermediate steps, etc...). + + Eventually, get rid of buildconf now it is no longer needed. In fact, we + really keep it for legacy, but have it just call autoreconf (and print a + nice user-friendly warning). Don't include it in the release tarballs, + though. + + Update doc, gitignore, and travis-CI jobs accordingly. + + Credit: + Signed-off-by: "Yann E. MORIN" + Cc: Sam Voss -- [Patrick Monnerat brought this change] +- [Will Cosgrove brought this change] - os400: include files for ILE/RPG + libssh2.h: Update Diffie Hellman group values (#493) - In addition, file os400/macros.h declares all procedures originally - defined as macros. It must not be used for real inclusion and is only - intended to be used as a `database' for macro wrapping procedures generation. + File: libssh2.h + + Notes: + Update the min, preferred and max DH group values based on RFC 8270. + + Credit: + Will Cosgrove, noted from email list by Mitchell Holland -- [Patrick Monnerat brought this change] +Marc Hoersken (22 Jun 2020) +- travis: use existing Makefile target to run checksrc - os400: add supplementary header files/wrappers. Define configuration. +- Makefile: also run checksrc on test source files -- [Patrick Monnerat brought this change] +- tests: avoid use of deprecated function _sleep (#490) - Protect callback function calls from macro substitution +- tests: avoid use of banned function strncat (#489) + +- tests: satisfy checksrc regarding max line length of 79 chars - Some structure fields holding callback addresses have the same name as the - underlying system function (connect, send, recv). Set parentheses around - their reference to suppress a possible macro substitution. + Follow up to 2764bc8e06d51876b6796d6080c6ac51e20f3332 + +- tests: satisfy checksrc with whitespace only fixes - Use a macro for connect() on OS/400 to resolve a const/nonconst parameter - problem. + checksrc.pl -i4 -m79 -ASIZEOFNOPAREN -ASNPRINTF + -ACOPYRIGHT -AFOPENMODE tests/*.[ch] -- [Patrick Monnerat brought this change] +- tests: add support for ports published via Docker for Windows - Add interface for OS/400 crypto library QC3 +- tests: restore retry behaviour for docker-machine ip command -- [Patrick Monnerat brought this change] +- tests: fix mix of declarations and code failing C89 compliance - misc: include stdarg.h for debug code +- wincng: add and improve checks in bit counting function -- [Patrick Monnerat brought this change] +- wincng: align bits to bytes calculation in all functions - Document crypto library interface +- wincng: do not disable key validation that can be enabled + + The modular exponentiation also works with key validation enabled. -- [Patrick Monnerat brought this change] +- wincng: fix return value in _libssh2_dh_secret + + Do not ignore return value of modular exponentiation. - Feature an optional crypto-specific macro to rsa sign a data fragment vector +- appveyor: build and run tests for WinCNG crypto backend + +GitHub (1 Jun 2020) +- [suryakalpo brought this change] + + INSTALL_CMAKE.md: Update formatting (#481) - OS/400 crypto library is unable to sign a precomputed SHA1 hash: however - it does support a procedure that hashes data fragments and rsa signs. - If defined, the new macro _libssh2_rsa_sha1_signv() implements this function - and disables use of _libssh2_rsa_sha1_sign(). + File: INSTALL_CMAKE.md - The function described above requires that the struct iovec unused slacks are - cleared: for this reason, macro libssh2_prepare_iovec() has been introduced. - It should be defined as empty for crypto backends that are not sensitive - to struct iovec unused slack values. + Notes: + Although the original text would be immediately clear to seasoned users of CMAKE and/or Unix shell, the lack of newlines may cause some confusion for newcomers. Hence, wrapping the texts in a md code-block such that the newlines appear as intended. + + credit: + suryakalpo -- [Patrick Monnerat brought this change] +Marc Hoersken (31 May 2020) +- src: add new and align include guards in header files (#480) + + Make sure all include guards exist and follow the same format. - Fold long lines in include files +- wincng: fix multiple definition of `_libssh2_wincng' (#479) + + Add missing include guard and move global state + from header to source file by using extern. -- [Viktor Szakats brought this change] +GitHub (28 May 2020) +- [Will Cosgrove brought this change] - kex.c: fix indentation + transport.c: moving total_num check from #476 (#478) - Closes #71 + file: transport.c + + notes: + moving total_num zero length check from #476 up to the prior bounds check which already includes a total_num check. Makes it slightly more readable. + + credit: + Will Cosgrove -- [Viktor Szakats brought this change] +- [lutianxiong brought this change] - add OpenSSL-1.1.0-pre2 compatibility + transport.c: fix use-of-uninitialized-value (#476) - Closes #70 + file:transport.c + + notes: + return error if malloc(0) + + credit: + lutianxiong -- [Viktor Szakats brought this change] +- [Dr. Koutheir Attouchi brought this change] - add OpenSSL 1.1.0-pre1 compatibility + libssh2_sftp.h: Changed type of LIBSSH2_FX_* constants to unsigned long, fixes #474 - * close https://github.com/libssh2/libssh2/issues/69 - * sync a declaration with the rest of similar ones - * handle EVP_MD_CTX_new() returning NULL with OpenSSL 1.1.0 - * fix potential memory leak with OpenSSL 1.1.0 in - _libssh2_*_init() functions, when EVP_MD_CTX_new() succeeds, - but EVP_DigestInit() fails. - -Marc Hoersken (22 Dec 2015) -- wincng.c: fixed _libssh2_wincng_hash_final return value + File: + libssh2_sftp.h - _libssh2_wincng_hash_final was returning the internal BCRYPT - status code instead of a valid libssh2 return value (0 or -1). + Notes: + Error constants `LIBSSH2_FX_*` are only returned by `libssh2_sftp_last_error()` which returns `unsigned long`. + Therefore these constants should be defined as unsigned long literals, instead of int literals. - This also means that _libssh2_wincng_hash never returned 0. + Credit: + Dr. Koutheir Attouchi -- wincng.c: fixed possible memory leak in _libssh2_wincng_hash +- [monnerat brought this change] + + os400qc3.c: constify libssh2_os400qc3_hash_update() data parameter. (#469) - If _libssh2_wincng_hash_update failed _libssh2_wincng_hash_final - would never have been called before. + Files: os400qc3.c, os400qc3.h - Reported by Zenju. + Notes: + Fixes building on OS400. #426 + + Credit: + Reported-by: hjindra on github, dev by Monnerat -Kamil Dudka (15 Dec 2015) -- [Paul Howarth brought this change] +- [monnerat brought this change] - libssh2.pc.in: fix the output of pkg-config --libs + HACKING.CRYPTO: keep up to date with new crypto definitions from code. (#466) - ... such that it does not include LDFLAGS used to build libssh2 itself. - There was a similar fix in the curl project long time ago: + File: HACKING.CRYPTO - https://github.com/bagder/curl/commit/curl-7_19_7-56-g4c8adc8 + Notes: + This commit updates the HACKING.CRYPTO documentation file in an attempt to make it in sync with current code. + New documented features are: - Bug: https://bugzilla.redhat.com/1279966 - Signed-off-by: Kamil Dudka + SHA384 + SHA512 + ECDSA + ED25519 + + Credit: + monnerat -Marc Hoersken (6 Dec 2015) -- hostkey.c: align code path of ssh_rsa_init to ssh_dss_init +- [Harry Sintonen brought this change] -- hostkey.c: fix invalid memory access if libssh2_dsa_new fails + kex.c: Add diffie-hellman-group14-sha256 Key Exchange Method (#464) - Reported by dimmaq, fixes #66 + File: kex.c + + Notes: Added diffie-hellman-group14-sha256 kex + + Credit: Harry Sintonen -Daniel Stenberg (3 Nov 2015) - [Will Cosgrove brought this change] - gcrypt: define libssh2_sha256_ctx - - Looks like it didn't make it into the latest commit for whatever reason. + os400qc3.h: define sha512 macros (#465) - Closes #58 + file: os400qc3.h + notes: fixes for building libssh2 1.9.x -- [Salvador Fandino brought this change] +- [Will Cosgrove brought this change] - libssh2_session_set_last_error: Add function + os400qc3.h: define EC types to fix building #426 (#462) - Net::SSH2, the Perl wrapping module for libssh2 implements several features* - on top of libssh2 that can fail and so need some mechanism to report the error - condition to the user. - - Until now, besides the error state maintained internally by libssh2, another - error state was maintained at the Perl level for every session object and then - additional logic was used to merge both error states. That is a maintenance - nighmare, and actually there is no way to do it correctly and consistently. + File: os400qc3.h + Notes: define missing EC types which prevents building + Credit: hjindra + +- [Brendan Shanks brought this change] + + hostkey.c: Fix 'unsigned int'/'uint32_t' mismatch (#461) - In order to allow the high level language to add new features to the library - but still rely in its error reporting features the new function - libssh2_session_set_last_error (that just exposses _libssh2_error_flags) is - introduced. + File: hostkey.c - *) For instance, connecting to a remote SSH service giving the hostname and - port. + Notes: + These types are the same size so most compilers are fine with it, but CodeWarrior (on classic MacOS) throws an ‘illegal implicit conversion’ error - Signed-off-by: Salvador Fandino - Signed-off-by: Salvador Fandiño + Credit: Brendan Shanks -- [Salvador Fandino brought this change] +- [Thomas Klausner brought this change] - _libssh2_error: Support allocating the error message + Makefile.am: Fix unportable test(1) operator. (#459) - Before this patch "_libssh2_error" required the error message to be a - static string. - - This patch adds a new function "_libssh2_error_flags" accepting an - additional "flags" argument and specifically the flag - "LIBSSH2_ERR_FLAG_DUP" indicating that the passed string must be - duplicated into the heap. + file: Makefile.am - Then, the method "_libssh2_error" has been rewritten to use that new - function under the hood. + Notes: + The POSIX comparison operator for test(1) is =; bash supports == but not even test from GNU coreutils does. - Signed-off-by: Salvador Fandino - Signed-off-by: Salvador Fandiño + Credit: + Thomas Klausner -- [Will Cosgrove brought this change] +- [Tseng Jun brought this change] - added engine.h include to fix warning + openssl.c: minor changes of coding style (#454) + + File: openssl.c + + Notes: + minor changes of coding style and align preprocessor conditional for #439 + + Credit: + Tseng Jun -- [sune brought this change] +- [Hans Meier brought this change] - kex.c: removed dupe entry from libssh2_kex_methods[] + openssl.c: Fix for use of uninitialized aes_ctr_cipher.key_len (#453) - Closes #51 + File: + Openssl.c + + Notes: + * Fix for use of uninitialized aes_ctr_cipher.key_len when using HAVE_OPAQUE_STRUCTS, regression from #439 + + Credit: + Hans Meirer, Tseng Jun -- [Salvador Fandiño brought this change] +- [Zenju brought this change] - userauth: Fix off by one error when reading public key file + agent.c: Fix Unicode builds on Windows (#417) - After reading the public key from file the size was incorrectly - decremented by one. + File: agent.c - This was usually a harmless error as the last character on the public - key file is an unimportant EOL. But if due to some error the public key - file is empty, the public key size becomes (uint)(0 - 1), resulting in - an unrecoverable out of memory error later. + Notes: + Fixes unicode builds for Windows in Visual Studio 16.3.2. - Signed-off-by: Salvador Fandi??o + Credit: + Zenju -- [Salvador Fandino brought this change] +- [Hans Meier brought this change] - channel: Detect bad usage of libssh2_channel_process_startup + openssl.c: Fix use-after-free crash in openssl backend without memory leak (#439) - A common novice programmer error (at least among those using the - wrapping Perl module Net::SSH2), is to try to reuse channels. + Files: openssl.c - This patchs detects that incorrect usage and fails with a - LIBSSH2_ERROR_BAD_USE error instead of hanging. + Notes: + Fixes memory leaks and use after free AES EVP_CIPHER contexts when using OpenSSL 1.0.x. - Signed-off-by: Salvador Fandino + Credit: + Hans Meier -- [Will Cosgrove brought this change] +- [Romain Geissler @ Amadeus brought this change] - kex: Added diffie-hellman-group-exchange-sha256 support + Session.c: Fix undefined warning when mixing with LTO-enabled libcurl. (#449) - ... and fixed HMAC_Init depricated usage + File: Session.c - Closes #48 + Notes: + With gcc 9, libssh2, libcurl and LTO enabled for all binaries I see this + warning (error with -Werror): + + vssh/libssh2.c: In function ‘ssh_statemach_act’: + /data/mwrep/rgeissler/ospack/ssh2/BUILD/libssh2-libssh2-03c7c4a/src/session.c:579:9: error: ‘seconds_to_next’ is used uninitialized in this function [-Werror=uninitialized] + 579 | int seconds_to_next; + | ^ + lto1: all warnings being treated as errors + + Gcc normally issues -Wuninitialized when it is sure there is a problem, + and -Wmaybe-uninitialized when it's not sure, but it's possible. Here + the compiler seems to have find a real case where this could happen. I + looked in your code and overall it seems you always check if the return + code is non null, not often that it's below zero. I think we should do + the same here. With this patch, gcc is fine. + + Credit: + Romain-Geissler-1A -Alexander Lamaison (21 Sep 2015) -- Prefixed new #defines to prevent collisions. +- [Zenju brought this change] + + transport.c: Fix crash with delayed compression (#443) - Other libraries might have their own USE_WIN32_*FILES. + Files: transport.c + + Notes: + Fixes crash with delayed compression option using Bitvise server. + + Contributor: + Zenju -- [keith-daigle brought this change] +- [Will Cosgrove brought this change] - Update examples/scp.c to fix bug where large files on win32 would cause got to wrap and go negative + Update INSTALL_MAKE path to INSTALL_MAKE.md (#446) + + Included for #429 -- [David Byron brought this change] +- [Will Cosgrove brought this change] - add libssh2_scp_recv2 to support large (> 2GB) files on windows + Update INSTALL_CMAKE filename to INSTALL_CMAKE.md (#445) + + Fixing for #429 -Daniel Stenberg (17 Sep 2015) -- [sune brought this change] +- [Wallace Souza brought this change] - WinCNG: support for SHA256/512 HMAC + Rename INSTALL_CMAKE to INTALL_CMAKE.md (#429) - Closes #47 + Adding Markdown file extension in order to Github render the instructions properly -- [brian m. carlson brought this change] +Will Cosgrove (17 Dec 2019) +- [Daniel Stenberg brought this change] - Add support for HMAC-SHA-256 and HMAC-SHA-512. + include/libssh2.h: fix comment: the known host key uses 4 bits (#438) + +- [Zenju brought this change] + + ssh-ed25519: Support PKIX + calc pubkey from private (#416) - Implement support for these algorithms and wire them up to the libgcrypt - and OpenSSL backends. Increase the maximum MAC buffer size to 64 bytes - to prevent buffer overflows. Prefer HMAC-SHA-256 over HMAC-SHA-512, and - that over HMAC-SHA-1, as OpenSSH does. + Files: openssl.c/h + Author: Zenju + Notes: + Adds support for PKIX key reading by fixing: - Closes #40 + _libssh2_pub_priv_keyfile() is missing the code to extract the ed25519 public key from a given private key + + _libssh2_ed25519_new_private_frommemory is only parsing the openssh key format but does not understand PKIX (as retrieved via PEM_read_bio_PrivateKey) -- [Zenju brought this change] +GitHub (15 Oct 2019) +- [Will Cosgrove brought this change] - kex: free server host key before allocating it (again) + .travis.yml: Fix Chrome and 32 bit builds (#423) - Fixes a memory leak when Synology server requests key exchange + File: .travis.yml - Closes #43 + Notes: + * Fix Chrome installing by using Travis build in directive + * Update to use libgcrypt20-dev package to fix 32 bit builds based on comments found here: + https://launchpad.net/ubuntu/xenial/i386/libgcrypt11-dev -- [Viktor Szakats brought this change] +- [Will Cosgrove brought this change] - GNUmakefile: up OpenSSL version + packet.c: improved parsing in packet_x11_open (#410) - closes #23 + Use new API to parse data in packet_x11_open() for better bounds checking. -- [Viktor Szakats brought this change] +Will Cosgrove (12 Sep 2019) +- [Michael Buckley brought this change] - GNUmakefile: add -m64 CFLAGS when targeting mingw64, add -m32/-m64 to LDFLAGS + knownhost.c: Double the static buffer size when reading and writing known hosts (#409) - libssh2 equivalent of curl patch https://github.com/bagder/curl/commit/d21b66835f2af781a3c2a685abc92ef9f0cd86be + Notes: + We had a user who was being repeatedly prompted to accept a server key repeatedly. It turns out the base64-encoded key was larger than the static buffers allocated to read and write known hosts. I doubled the size of these buffers. - This allows to build for the non-default target when using a multi-target mingw distro. - Also bump default OpenSSL dependency path to 1.0.2c. + Credit: + Michael Buckley -- [Viktor Szakats brought this change] +GitHub (4 Sep 2019) +- [Will Cosgrove brought this change] - GNUmakefile: add support for LIBSSH2_LDFLAG_EXTRAS + packet.c: improved packet parsing in packet_queue_listener (#404) - It is similar to existing LIBSSH2_CFLAG_EXTRAS, but for - extra linker options. + * improved bounds checking in packet_queue_listener - Also delete some line/file ending whitespace. + file: packet.c - closes #27 + notes: + improved parsing packet in packet_queue_listener -- [nasacj brought this change] +- [Will Cosgrove brought this change] - hostkey.c: Fix compiling error when OPENSSL_NO_MD5 is defined + packet.c: improve message parsing (#402) - Closes #32 + * packet.c: improve parsing of packets + + file: packet.c + + notes: + Use _libssh2_get_string API in SSH_MSG_DEBUG/SSH_MSG_DISCONNECT. Additional uint32 bounds check in SSH_MSG_GLOBAL_REQUEST. -- [Mizunashi Mana brought this change] +- [Will Cosgrove brought this change] - openssl.h: adjust the rsa/dsa includes + misc.c: _libssh2_ntohu32 cast bit shifting (#401) - ... to work when built without DSA support. + To quite overly aggressive analyzers. - Closes #36 + Note, the builds pass, Travis is having some issues with Docker images. -Alexander Lamaison (26 Jul 2015) -- Let CMake build work as a subproject. - - Patch contributed by JasonHaslam. +- [Will Cosgrove brought this change] -- Fix builds with Visual Studio 2015. + kex.c: improve bounds checking in kex_agree_methods() (#399) - VS2015 moved stdio functions to the header files as inline function. That means check_function_exists can't detect them because it doesn't use header files - just does a link check. Instead we need to use check_symbol_exists with the correct headers. + file: kex.c + + notes: + use _libssh2_get_string instead of kex_string_pair which does additional checks -Kamil Dudka (2 Jul 2015) -- cmake: include CMake files in the release tarballs +Will Cosgrove (23 Aug 2019) +- [Fabrice Fontaine brought this change] + + acinclude.m4: add mbedtls to LIBS (#371) - Despite we announced the CMake support in libssh2-1.6.0 release notes, - the files required by the CMake build system were not included in the - release tarballs. Hence, the only way to use CMake for build was the - upstream git repository. + Notes: + This is useful for static builds so that the Libs.private field in + libssh2.pc contains correct info for the benefit of pkg-config users. + Static link with libssh2 requires this information. - This commit makes CMake actually supported in the release tarballs. - -- tests/mansyntax.sh: fix 'make distcheck' with recent autotools + Signed-off-by: Baruch Siach + [Retrieved from: + https://git.buildroot.net/buildroot/tree/package/libssh2/0002-acinclude.m4-add-mbedtls-to-LIBS.patch] + Signed-off-by: Fabrice Fontaine - Do not create symbolic links off the build directory. Recent autotools - verify that out-of-source build works even if the source directory tree - is not writable. + Credit: + Fabrice Fontaine -- openssl: fix memleak in _libssh2_dsa_sha1_verify() +- [jethrogb brought this change] -Daniel Stenberg (12 Jun 2015) -- openssl: make libssh2_sha1 return error code + Generate debug info when building with MSVC (#178) - - use the internal prefix _libssh2_ for non-exported functions + files: CMakeLists.txt - - removed libssh2_md5() since it wasn't used + notes: Generate debug info when building with MSVC - Reported-by: Kamil Dudka + credit: + jethrogb -- [LarsNordin-LNdata brought this change] +- [Panos brought this change] - SFTP: Increase speed and datasize in SFTP read + Add agent forwarding implementation (#219) - The function sftp_read never return more then 2000 bytes (as it should - when I asked Daniel). I increased the MAX_SFTP_READ_SIZE to 30000 but - didn't get the same speed as a sftp read in SecureSSH. I analyzed the - code and found that a return always was dona when a chunk has been read. - I changed it to a sliding buffer and worked on all available chunks. I - got an increase in speed and non of the test I have done has failed - (both local net and over Internet). Please review and test. I think - 30000 is still not the optimal MAX_SFTP_READ_SIZE, my next goal is to - make an API to enable changing this value (The SecureSSH sftp_read has - more complete filled packages when comparing the network traffic) + files: channel.c, test_agent_forward_succeeds.c, libssh2_priv.h, libssh2.h, ssh2_agent_forwarding.c + + notes: + * Adding SSH agent forwarding. + * Fix agent forwarding message, updated example. + Added integration test code and cmake target. Added example to cmake list. + + credit: + pkittenis -- bump: start working on 1.6.1 +GitHub (2 Aug 2019) +- [Will Cosgrove brought this change] -Version 1.6.0 (5 Jun 2015) + Update EditorConfig + + Added max_line_length = 80 -Daniel Stenberg (5 Jun 2015) -- RELEASE-NOTES: synced with 858930cae5c6a +- [Will Cosgrove brought this change] -Marc Hoersken (19 May 2015) -- wincng.c: fixed indentation + global.c : fixed call to libssh2_crypto_exit #394 (#396) + + * global.c : fixed call to libssh2_crypto_exit #394 + + File: global.c + + Notes: Don't call `libssh2_crypto_exit()` until `_libssh2_initialized` count is down to zero. + + Credit: seba30 + +Will Cosgrove (30 Jul 2019) +- [hlefebvre brought this change] + + misc.c : Add an EWOULDBLOCK check for better portability (#172) + + File: misc.c + + Notes: Added support for all OS' that implement EWOULDBLOCK, not only VMS + + Credit: hlefebvre + +- [Etienne Samson brought this change] + + userauth.c: fix off by one error when loading public keys with no id (#386) + + File: userauth.c + + Credit: + Etienne Samson + + Notes: + Caught by ASAN: + + ================================================================= + ==73797==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x60700001bcf0 at pc 0x00010026198d bp 0x7ffeefbfed30 sp 0x7ffeefbfe4d8 + READ of size 69 at 0x60700001bcf0 thread T0 + 2019-07-04 08:35:30.292502+0200 atos[73890:2639175] examining /Users/USER/*/libssh2_clar [73797] + #0 0x10026198c in wrap_memchr (libclang_rt.asan_osx_dynamic.dylib:x86_64h+0x1f98c) + #1 0x1000f8e66 in file_read_publickey userauth.c:633 + #2 0x1000f2dc9 in userauth_publickey_fromfile userauth.c:1513 + #3 0x1000f2948 in libssh2_userauth_publickey_fromfile_ex userauth.c:1590 + #4 0x10000e254 in test_userauth_publickey__ed25519_auth_ok publickey.c:69 + #5 0x1000090c3 in clar_run_test clar.c:260 + #6 0x1000038f3 in clar_run_suite clar.c:343 + #7 0x100003272 in clar_test_run clar.c:522 + #8 0x10000c3cc in main runner.c:60 + #9 0x7fff5b43b3d4 in start (libdyld.dylib:x86_64+0x163d4) + + 0x60700001bcf0 is located 0 bytes to the right of 80-byte region [0x60700001bca0,0x60700001bcf0) + allocated by thread T0 here: + #0 0x10029e053 in wrap_malloc (libclang_rt.asan_osx_dynamic.dylib:x86_64h+0x5c053) + #1 0x1000b4978 in libssh2_default_alloc session.c:67 + #2 0x1000f8aba in file_read_publickey userauth.c:597 + #3 0x1000f2dc9 in userauth_publickey_fromfile userauth.c:1513 + #4 0x1000f2948 in libssh2_userauth_publickey_fromfile_ex userauth.c:1590 + #5 0x10000e254 in test_userauth_publickey__ed25519_auth_ok publickey.c:69 + #6 0x1000090c3 in clar_run_test clar.c:260 + #7 0x1000038f3 in clar_run_suite clar.c:343 + #8 0x100003272 in clar_test_run clar.c:522 + #9 0x10000c3cc in main runner.c:60 + #10 0x7fff5b43b3d4 in start (libdyld.dylib:x86_64+0x163d4) + + SUMMARY: AddressSanitizer: heap-buffer-overflow (libclang_rt.asan_osx_dynamic.dylib:x86_64h+0x1f98c) in wrap_memchr + Shadow bytes around the buggy address: + 0x1c0e00003740: fd fd fd fd fd fd fd fd fd fd fa fa fa fa fd fd + 0x1c0e00003750: fd fd fd fd fd fd fd fa fa fa fa fa 00 00 00 00 + 0x1c0e00003760: 00 00 00 00 00 00 fa fa fa fa 00 00 00 00 00 00 + 0x1c0e00003770: 00 00 00 fa fa fa fa fa fd fd fd fd fd fd fd fd + 0x1c0e00003780: fd fd fa fa fa fa fd fd fd fd fd fd fd fd fd fa + =>0x1c0e00003790: fa fa fa fa 00 00 00 00 00 00 00 00 00 00[fa]fa + 0x1c0e000037a0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa + 0x1c0e000037b0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa + 0x1c0e000037c0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa + 0x1c0e000037d0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa + 0x1c0e000037e0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa + Shadow byte legend (one shadow byte represents 8 application bytes): + Addressable: 00 + Partially addressable: 01 02 03 04 05 06 07 + Heap left redzone: fa + Freed heap region: fd + Stack left redzone: f1 + Stack mid redzone: f2 + Stack right redzone: f3 + Stack after return: f5 + Stack use after scope: f8 + Global redzone: f9 + Global init order: f6 + Poisoned by user: f7 + Container overflow: fc + Array cookie: ac + Intra object redzone: bb + ASan internal: fe + Left alloca redzone: ca + Right alloca redzone: cb + Shadow gap: cc -- [sbredahl brought this change] +- [Thilo Schulz brought this change] - wincng.c: fixed memleak in (block) cipher destructor + openssl.c : Fix use-after-free crash on reinitialization of openssl backend + + file : openssl.c + + notes : + libssh2's openssl backend has a use-after-free condition if HAVE_OPAQUE_STRUCTS is defined and you call libssh2_init() again after prior initialisation/deinitialisation of libssh2 + + credit : Thilo Schulz + +- [axjowa brought this change] + + openssl.h : Use of ifdef where if should be used (#389) + + File : openssl.h + + Notes : + LIBSSH2_ECDSA and LIBSSH2_ED25519 are always defined so the #ifdef + checks would never be false. + + This change makes it possible to build libssh2 against OpenSSL built + without EC support. + + Change-Id: I0a2f07c2d80178314dcb7d505d1295d19cf15afd + + Credit : axjowa -Alexander Lamaison (6 May 2015) -- [Jakob Egger brought this change] +- [Zenju brought this change] - libssh2_channel_open: more detailed error message + Agent.c : Preserve error info from agent_list_identities() (#374) - The error message returned by libssh2_channel_open in case of a server side channel open failure is now more detailed and includes the four standard error conditions in RFC 4254. + Files : agent.c + + Notes : + Currently the error details as returned by agent_transact_pageant() are overwritten by a generic "agent list id failed" message by int agent_list_identities(LIBSSH2_AGENT* agent). + + Credit : + Zenju -- [Hannes Domani brought this change] +- [Who? Me?! brought this change] - kex: fix libgcrypt memory leaks of bignum + Channel.c: Make sure the error code is set in _libssh2_channel_open() (#381) - Fixes #168. + File : Channel.c + + Notes : + if _libssh2_channel_open() fails, set the error code. + + Credit : + mark-i-m -Marc Hoersken (3 Apr 2015) -- configure.ac: check for SecureZeroMemory for clear memory feature +- [Orgad Shaneh brought this change] -- Revert "wincng.c: fix clear memory feature compilation with mingw" + Kex.c, Remove unneeded call to strlen (#373) - This reverts commit 2d2744efdd0497b72b3e1ff6e732aa4c0037fc43. + File : Kex.c - Autobuilds show that this did not solve the issue. - And it seems like RtlFillMemory is defined to memset, - which would be optimized out by some compilers. - -- wincng.c: fix clear memory feature compilation with mingw + Notes : + Removed call to strlen + + Credit : + Orgad Shaneh -Alexander Lamaison (1 Apr 2015) -- [LarsNordin-LNdata brought this change] +- [Pedro Monreal brought this change] - Enable use of OpenSSL that doesn't have DSA. + Spelling corrections (#380) - Added #if LIBSSH2_DSA for all DSA functions. + Files : + libssh2.h, libssh2_sftp.h, bcrypt_pbkdf.c, mbedtls.c, sftp.c, ssh2.c + + Notes : + * Fixed misspellings + + Credit : + Pedro Monreal -- [LarsNordin-LNdata brought this change] +- [Sebastián Katzer brought this change] - Use correct no-blowfish #define with OpenSSL. + Fix Potential typecast error for `_libssh2_ecdsa_key_get_curve_type` (#383) - The OpenSSL define is OPENSSL_NO_BF, not OPENSSL_NO_BLOWFISH. - -Marc Hoersken (25 Mar 2015) -- configure: error if explicitly enabled clear-memory is not supported + Issue : #383 - This takes 22bd8d81d8fab956085e2079bf8c29872455ce59 and - b8289b625e291bbb785ed4add31f4759241067f3 into account, - but still makes it enabled by default if it is supported - and error out in case it is unsupported and was requested. - -Daniel Stenberg (25 Mar 2015) -- configure: make clear-memory default but only WARN if backend unsupported + Files : hostkey.c, crypto.h, openssl.c - ... instead of previous ERROR. + Notes : + * Fix potential typecast error for `_libssh2_ecdsa_key_get_curve_type` + * Rename _libssh2_ecdsa_key_get_curve_type to _libssh2_ecdsa_get_curve_type + + Credit : + Sebastián Katzer -Marc Hoersken (24 Mar 2015) -- wincng.h: fix warning about computed return value not being used +GitHub (20 Jun 2019) +- [Will Cosgrove brought this change] -- nonblocking examples: fix warning about unused tvdiff on Mac OS X + bump copyright date -Daniel Stenberg (24 Mar 2015) -- openssl: fix compiler warnings +Version 1.9.0 (19 Jun 2019) -- cofigure: fix --disable-clear-memory check +GitHub (19 Jun 2019) +- [Will Cosgrove brought this change] -Marc Hoersken (23 Mar 2015) -- scp.c: improved command length calculation - - Reduced number of calls to strlen, because shell_quotearg already - returns the length of the resulting string (e.q. quoted path) - which we can add to the existing and known cmd_len. - Removed obsolete call to memset again, because we can put a final - NULL-byte at the end of the string using the calculated length. + 1.9 Formatting -- scp.c: improved and streamlined formatting +- [Will Cosgrove brought this change] -- scp.c: fix that scp_recv may transmit not initialised memory + 1.9 Release notes -- scp.c: fix that scp_send may transmit not initialised memory - - Fixes ticket 244. Thanks Torsten. +Will Cosgrove (17 May 2019) +- [Alexander Curtiss brought this change] -- kex: do not ignore failure of libssh2_sha1_init() + libgcrypt.c : Fixed _libssh2_rsa_sha1_sign memory leak. (#370) - Based upon 43b730ce56f010e9d33573fcb020df49798c1ed8. - Fixes ticket 290. Thanks for the suggestion, mstrsn. - -- wincng.h: fix return code of libssh2_md5_init() + File: libgcrypt.c + + Notes : Added calls to gcry_sexp_release to free memory allocated by gcry_sexp_find_token + + Credit : + Reporter : beckmi + PR by: Alexander Curtiss -- openssl.c: fix possible segfault in case EVP_DigestInit fails +- [Orivej Desh brought this change] -- wincng.c: fix possible use of uninitialized variables + libssh2_priv.h : Fix musl build warning on sys/poll.h (#346) + + File : libssh2_priv.h + + Notes : + musl prints `redirecting incorrect #include to ` + http://git.musl-libc.org/cgit/musl/commit/include/sys/poll.h?id=54446d730cfb17c5f7bcf57f139458678f5066cc + + poll is defined by POSIX to be in poll.h: + http://pubs.opengroup.org/onlinepubs/7908799/xsh/poll.html + + Credit : Orivej Desh -- wincng.c: fix unused argument warning if clear memory is not enabled +GitHub (1 May 2019) +- [Will Cosgrove brought this change] -- wincng: Added explicit clear memory feature to WinCNG backend + kex.c : additional bounds checks in diffie_hellman_sha1/256 (#361) - This re-introduces the original feature proposed during - the development of the WinCNG crypto backend. It still needs - to be added to libssh2 itself and probably other backends. + Files : kex.c, misc.c, misc.h - Memory is cleared using the function SecureZeroMemory which is - available on Windows systems, just like the WinCNG backend. - -- wincng.c: fixed mixed line-endings + Notes : + Fixed possible out of bounds memory access when reading malformed data in diffie_hellman_sha1() and diffie_hellman_sha256(). + + Added _libssh2_copy_string() to misc.c to return an allocated and filled char buffer from a string_buf offset. Removed no longer needed s var in kmdhgGPshakex_state_t. -- wincng.c: fixed use of invalid parameter types in a8d14c5dcf +Will Cosgrove (26 Apr 2019) +- [Tseng Jun brought this change] -- wincng.c: only try to load keys corresponding to the algorithm + sftp.c : sftp_bin2attr() Correct attrs->gid assignment (#366) + + Regression with fix for #339 + + Credit : Tseng Jun -- wincng.c: moved PEM headers into definitions +- [Tseng Jun brought this change] -- wincng.h: fixed invalid parameter name + kex.c : Correct type cast in curve25519_sha256() (#365) -- wincng: fixed mismatch with declarations in crypto.h +GitHub (24 Apr 2019) +- [Will Cosgrove brought this change] -- userauth.c: fixed warning C6001: using uninitialized sig and sig_len + transport.c : scope local total_num var (#364) + + file : transport.c + notes : move local `total_num` variable inside of if block to prevent scope access issues which caused #360. -- pem.c: fixed warning C6269: possible incorrect order of operations +Will Cosgrove (24 Apr 2019) +- [doublex brought this change] -- wincng: add support for authentication keys to be passed in memory + transport.c : fixes bounds check if partial packet is read - Based upon 18cfec8336e and daa2dfa2db. - -- pem.c: add _libssh2_pem_parse_memory to parse PEM from memory + Files : transport.c - Requirement to implement 18cfec8336e for Libgcrypt and WinCNG. - -- pem.c: fix copy and paste mistake from 55d030089b8 - -- userauth.c: fix another possible dereference of a null pointer + Issue : #360 + + Notes : + 'p->total_num' instead of local value total_num when doing bounds check. + + Credit : Doublex -- userauth.c: fix possible dereference of a null pointer +GitHub (23 Apr 2019) +- [Will Cosgrove brought this change] -- pem.c: reduce number of calls to strlen in readline + Editor config file for source files (#322) + + Simple start to an editor config file when editing source files to make sure they are configured correctly. -Alexander Lamaison (17 Mar 2015) - [Will Cosgrove brought this change] - Initialise HMAC_CTX in more places. + misc.c : String buffer API improvements (#332) - Missed a couple more places we init ctx to avoid openssl threading crash. - -- Build build breakage in WinCNG backend caused when adding libssh2_userauth_publickey_frommemory. + Files : misc.c, hostkey.c, kex.c, misc.h, openssl.c, sftp.c - The new feature isn't implemented for the WinCNG backend currently, but the WinCNG backend didn't contain any implementation of the required backend functions - even ones that returns an error. That caused link errors. + Notes : + * updated _libssh2_get_bignum_bytes and _libssh2_get_string. Now pass in length as an argument instead of returning it to keep signedness correct. Now returns -1 for failure, 0 for success. - This change fixes the problem by providing an implementation of the backend functions that returns an error. + _libssh2_check_length now returns 0 on success and -1 on failure to match the other string_buf functions. Added comment to _libssh2_check_length. + + Credit : Will Cosgrove -- Fix breakage in WinCNG backend caused by introducing libssh2_hmac_ctx_init. +Will Cosgrove (19 Apr 2019) +- [doublex brought this change] + + mbedtls.c : _libssh2_mbedtls_rsa_new_private_frommemory() allow private-key from memory (#359) - The macro was defined to nothing for the libgcrypt backend, but not for WinCNG. This brings the latter into line with the former. + File : mbedtls.c + + Notes: _libssh2_mbedtls_rsa_new_private_frommemory() fixes private-key from memory reading to by adding NULL terminator before parsing; adds passphrase support. + + Credit: doublex -Daniel Stenberg (15 Mar 2015) -- userauth_publickey_frommemory.3: add AVAILABILITY +- [Ryan Kelley brought this change] + + Session.c : banner_receive() from leaking when accessing non ssh ports (#356) - ... it will be added in 1.6.0 + File : session.c + + Release previous banner in banner_receive() if the session is reused after a failed connection. + + Credit : Ryan Kelley -- libssh2: next version will be called 1.6.0 +GitHub (11 Apr 2019) +- [Will Cosgrove brought this change] + + Formatting in agent.c - ... since we just added a new function. + Removed whitespace. -- docs: add libssh2_userauth_publickey_frommemory.3 to dist +- [Will Cosgrove brought this change] + + Fixed formatting in agent.c - The function and man page were added in commit 18cfec8336e + Quiet linter around a couple if blocks and pointer. -- [Jakob Egger brought this change] +Will Cosgrove (11 Apr 2019) +- [Zhen-Huan HWANG brought this change] - direct_tcpip: Fixed channel write + sftp.c : discard and reset oversized packet in sftp_packet_read() (#269) - There were 3 bugs in this loop: - 1) Started from beginning after partial writes - 2) Aborted when 0 bytes were sent - 3) Ignored LIBSSH2_ERROR_EAGAIN + file : sftp.c - See also: - https://trac.libssh2.org/ticket/281 - https://trac.libssh2.org/ticket/293 + notes : when sftp_packet_read() encounters an sftp packet which exceeds SFTP max packet size it now resets the reading state so it can continue reading. + + credit : Zhen-Huan HWANG -Alexander Lamaison (15 Mar 2015) +GitHub (11 Apr 2019) - [Will Cosgrove brought this change] - Must init HMAC_CTX before using it. + Add agent functions libssh2_agent_get_identity_path() and libssh2_agent_set_identity_path() (#308) - Must init ctx before using it or openssl will reuse the hmac which is not thread safe and causes a crash. - Added libssh2_hmac_ctx_init macro. - -- Add continuous integration configurations. + File : agent.c - Linux-based CI is done by Travis CI. Windows-based CI is done by Appveyor. + Notes : + Libssh2 uses the SSH_AUTH_SOCK env variable to read the system agent location. However, when using a custom agent path you have to set this value using setenv which is not thread-safe. The new functions allow for a way to set a custom agent socket path in a thread safe manor. -- [David Calavera brought this change] +- [Will Cosgrove brought this change] - Allow authentication keys to be passed in memory. + Simplified _libssh2_check_length (#350) - All credits go to Joe Turpin, I'm just reaplying and cleaning his patch: - http://www.libssh2.org/mail/libssh2-devel-archive-2012-01/0015.shtml + * Simplified _libssh2_check_length - * Use an unimplemented error for extracting keys from memory with libgcrypt. + misc.c : _libssh2_check_length() + + Removed cast and improved bounds checking and format. + + Credit : Yuriy M. Kaminskiy -Daniel Stenberg (14 Mar 2015) -- docs: include the renamed INSTALL* files in dist +- [Will Cosgrove brought this change] -Alexander Lamaison (13 Mar 2015) -- Prevent collisions between CMake and Autotools in examples/ and tests/. + _libssh2_check_length() : additional bounds check (#348) + + Misc.c : _libssh2_check_length() + + Ensure the requested length is less than the total length before doing the additional bounds check -- Avoid clash between CMake build and Autotools. +Daniel Stenberg (25 Mar 2019) +- misc: remove 'offset' from string_buf - Autotools expects a configuration template file at src/libssh2_config.h.in, which buildconf generates. But the CMake build system has its CMake-specific version of the file at this path. This means that, if you don't run buildconf, the Autotools build will fail because it configured the wrong header template. + It isn't necessary. - See https://github.com/libssh2/libssh2/pull/8. + Closes #343 -- Merge pull request #8 from alamaison/cmake +- sftp: repair mtime from e1ead35e475 - CMake build system. + A regression from e1ead35e4759 broke the SFTP mtime logic in + sftp_bin2attr + + Also simplified the _libssh2_get_u32/u64 functions slightly. + + Closes #342 -- CMake build system. +- session_disconnect: don't zero state, just clear the right bit - Tested: - - Windows: - - Visual C++ 2005/2008/2010/2012/2013/MinGW-w64 - - static/shared - - 32/64-bit - - OpenSSL/WinCNG - - Without zlib - - Linux: - - GCC 4.6.3/Clang 3.4 - - static/shared - - 32/64-bit - - OpenSSL/Libgcrypt - - With/Without zlib - - MacOS X - - AppleClang 6.0.0 - - static - - 64-bit - - OpenSSL - - Without zlib + If we clear the entire field, the freeing of data in session_free() is + skipped. Instead just clear the bit that risk making the code get stuck + in the transport functions. - Conflicts: - README + Regression from 4d66f6762ca3fc45d9. + + Reported-by: dimmaq on github + Fixes #338 + Closes #340 -- Man man syntax tests fail gracefully if man version is not suitable. +- libssh2_sftp.h: restore broken ABI + + Commit 41fbd44 changed variable sizes/types in a public struct which + broke the ABI, which breaks applications! + + This reverts that change. + + Closes #339 -- Return valid code from test fixture on failure. +- style: make includes and examples code style strict - The sshd test fixture was returning -1 if an error occurred, but negative error codes aren't technically valid (google it). Bash on Windows converted them to 0 which made setup failure look as though all tests were passing. + make travis and the makefile rule verify them too + + Closes #334 -- Let mansyntax.sh work regardless of where it is called from. +GitHub (21 Mar 2019) +- [Daniel Stenberg brought this change] -Daniel Stenberg (12 Mar 2015) -- [Viktor Szakáts brought this change] + create a github issue template - mingw build: allow to pass custom CFLAGS +Daniel Stenberg (21 Mar 2019) +- stale-bot: activated - Allow to pass custom `CFLAGS` options via environment variable - `LIBSSH2_CFLAG_EXTRAS`. Default and automatically added options of - `GNUmakefile` have preference over custom ones. This addition is useful - for passing f.e. custom CPU tuning or LTO optimization (`-flto - -ffat-lto-objects`) options. The only current way to do this is to edit - `GNUmakefile`. This patch makes it unnecessary. + The stale bot will automatically mark stale issues (inactive for 90 + days) and if still untouched after 21 more days, close them. - This is a mirror of similar libcurl patch: - https://github.com/bagder/curl/pull/136 + See https://probot.github.io/apps/stale/ -- [Will Cosgrove brought this change] - - userauth: Fixed prompt text no longer being copied to the prompts struct +- libssh2_session_supported_algs.3: fix formatting mistakes - Regression from 031566f9c + Reported-by: Max Horn + Fixes #57 -- README: update the git repo locations +- [Zenju brought this change] -- wait_socket: wrong use of difftime() + libssh2.h: Fix Error C2371 'ssize_t': redefinition - With reversed arguments it would always return a negative value... + Closes #331 + +- travis: add code style check - Bug: https://github.com/bagder/libssh2/issues/1 + Closes #324 -- bump: start working toward 1.5.1 now +- code style: unify code style + + Indent-level: 4 + Max columns: 79 + No spaces after if/for/while + Unified brace positions + Unified white spaces -Version 1.5.0 (11 Mar 2015) +- src/checksrc.pl: code style checker + + imported as-is from curl -Daniel Stenberg (11 Mar 2015) -- RELEASE-NOTES: 1.5.0 release +Will Cosgrove (19 Mar 2019) +- Merge branch 'MichaelBuckley-michaelbuckley-security-fixes' -- [Mariusz Ziulek brought this change] +- Silence unused var warnings (#329) + + Silence warnings about unused variables in this test - kex: bail out on rubbish in the incoming packet +- Removed unneeded > 0 check - CVE-2015-1782 + When checking `userauth_kybd_num_prompts > 100` we don't care if it's also above zero. + +- [Matthew D. Fuller brought this change] + + Spell OpenSS_H_ right when talking about their specific private key (#321) - Bug: http://www.libssh2.org/adv_20150311.html + Good catch, thanks. -- docs: move INSTALL, AUTHORS, HACKING and TODO to docs/ +GitHub (19 Mar 2019) +- [Will Cosgrove brought this change] + + Silence unused var warnings (#329) - And with this, cleanup README to be shorter and mention the new source - code home. + Silence warnings about unused variables in this test -- .gitignore: don't ignore INSTALL +Michael Buckley (19 Mar 2019) +- Fix more scope and printf warning errors -Dan Fandrich (4 Mar 2015) -- examples/x11.c: include sys/select.h for improved portability +- Silence unused variable warning -Daniel Stenberg (4 Mar 2015) -- RELEASE-NOTES: synced with a8473c819bc068 +GitHub (19 Mar 2019) +- [Will Cosgrove brought this change] + + Removed unneeded > 0 check - In preparation for the upcoming 1.5.0 release. + When checking `userauth_kybd_num_prompts > 100` we don't care if it's also above zero. -Guenter Knauf (8 Jan 2015) -- NetWare build: added some missing exports. +Will Cosgrove (19 Mar 2019) +- [Matthew D. Fuller brought this change] -Marc Hoersken (29 Dec 2014) -- knownhost.c: fix use of uninitialized argument variable wrote + Spell OpenSS_H_ right when talking about their specific private key (#321) - Detected by clang scan in line 1195, column 18. + Good catch, thanks. -- examples/x11.c: fix result of operation is garbage or undefined - - Fix use of uninitialized structure w_size_bck. - Detected by clang scan in line 386, column 28. +Michael Buckley (18 Mar 2019) +- Fix errors identified by the build process -- examples/x11.c: remove dead assigments of some return values - - Detected by clang scan in line 212, column 9. - Detected by clang scan in line 222, column 13. - Detected by clang scan in line 410, column 13. +- Fix casting errors after merge -- examples/x11.c: fix possible memory leak if read fails - - Detected by clang scan in line 224, column 21. +GitHub (18 Mar 2019) +- [Michael Buckley brought this change] -- examples/x11.c: fix invalid removal of first list element + Merge branch 'master' into michaelbuckley-security-fixes + +Michael Buckley (18 Mar 2019) +- Move fallback SIZE_MAX and UINT_MAX to libssh2_priv.h + +- Fix type and logic issues with _libssh2_get_u64 + +Daniel Stenberg (17 Mar 2019) +- examples: fix various compiler warnings + +- lib: fix various compiler warnings + +- session: ignore pedantic warnings for funcpointer <=> void * + +- travis: add a build using configure - Fix use of memory after it was being freed. - Detected by clang scan in line 56, column 12. + Closes #320 -- userauth.c: make sure that sp_len is positive and avoid overflows +- configure: provide --enable-werror + +- appveyor: remove old builds that mostly cause failures - ... if the pointer subtraction of sp1 - pubkey - 1 resulted in a - negative or larger value than pubkey_len, memchr would fail. + ... and only run on master branch. - Reported by Coverity CID 89846. + Closes #323 -- channel.c: remove logically dead code, host cannot be NULL here +- cmake: add two missing man pages to get installed too - ... host cannot be NULL in line 525, because it is always - valid (e.g. at least set to "0.0.0.0") after lines 430 and 431. + Both libssh2_session_handshake.3 and + libssh2_userauth_publickey_frommemory.3 were installed by the configure + build already. - Reported by Coverity CID 89807. + Reported-by: Arfrever on github + Fixes #278 -- session.c: check return value of session_nonblock during startup +- include/libssh2.h: warning: "_WIN64" is not defined, evaluates to 0 - Reported by Coverity CID 89803. + We don't use #if for defines that might not be defined. -- session.c: check return value of session_nonblock in debug mode - - Reported by Coverity CID 89805. +- pem: //-comments are not allowed -- pem.c: fix mixed line-endings introduced with 8670f5da24 +Will Cosgrove (14 Mar 2019) +- [Daniel Stenberg brought this change] -- pem.c: make sure there's a trailing zero and b64data is not NULL + userauth: fix "Function call argument is an uninitialized value" (#318) - ... if there is no base64 data between PEM header and footer. - Reported by Coverity CID 89823. + Detected by scan-build. -- kex.c: make sure mlist is not set to NULL - - ... if the currently unsupported LANG methods are called. - Reported by Coverity CID 89834. +- fixed unsigned/signed issue -- packet.c: i < 256 was always true and i would overflow to 0 +Daniel Stenberg (15 Mar 2019) +- session_disconnect: clear state - Visualize that the 0-termination is intentional, because the array - is later passed to strlen within _libssh2_packet_askv. - -- silence multiple data conversion warnings - -Daniel Stenberg (23 Dec 2014) -- agent_connect_unix: make sure there's a trailing zero + If authentication is started but not completed before the application + gives up and instead wants to shut down the session, the '->state' field + might still be set and thus effectively dead-lock session_disconnect. - ... if the path name was too long. Reported by Coverity CID 89801. - -Marc Hoersken (22 Dec 2014) -- examples on Windows: use native SOCKET-type instead of int + This happens because both _libssh2_transport_send() and + _libssh2_transport_read() refuse to do anything as long as state is set + without the LIBSSH2_STATE_KEX_ACTIVE bit. - And check return values accordingly. + Reported in curl bug https://github.com/curl/curl/issues/3650 + + Closes #310 -- userauth.c: improve readability and clarity of for-loops +Will Cosgrove (14 Mar 2019) +- Release notes from 1.8.1 -Daniel Stenberg (22 Dec 2014) -- calloc: introduce LIBSSH2_CALLOC() - - A simple function using LIBSSH2_ALLOC + memset, since this pattern was - used in multiple places and this simplies code in general. +Michael Buckley (14 Mar 2019) +- Use string_buf in sftp_init(). -Marc Hoersken (15 Dec 2014) -- libssh2_priv.h: Ignore session, context and format parameters +- Guard against out-of-bounds reads in publickey.c -- x11 example: check return value of socket function +- Guard against out-of-bounds reads in session.c -- examples: fixed mixed line-endings introduced with aedfba25b8 +- Guard against out-of-bounds reads in userauth.c -- wincng.c: explicitly ignore BCrypt*AlgorithmProvider return codes - - Fixes VS2012 code analysis warning C6031: - return value ignored: could return unexpected value +- Use LIBSSH2_ERROR_BUFFER_TOO_SMALL instead of LIBSSH2_ERROR_OUT_OF_BOUNDARY in sftp.c -- wincng.c: fix possible invalid memory write access - - Fixes VS2012 code analysis warning C6386: - buffer overrun: accessing 'pbOutput', the writable size is - 'cbOutput' bytes, but '3' bytes may be written: libssh2 wincng.c 610 +- Additional bounds checking in sftp.c -- tests on Windows: check for WSAStartup return code - - Fixes VS2012 code analysis warning C6031: - return value ignored: could return unexpected value +- Additional length checks to prevent out-of-bounds reads and writes in _libssh2_packet_add(). https://libssh2.org/CVE-2019-3862.html -- wincng.c: fix possible NULL pointer de-reference of bignum - - Fixes VS2012 code analysis warning C6011: - dereferencing NULL pointer 'bignum'. libssh2 wincng.c 1567 +- Add a required_size parameter to sftp_packet_require et. al. to require callers of these functions to handle packets that are too short. https://libssh2.org/CVE-2019-3860.html -- wincng.c: fix possible use of uninitialized memory - - Fixes VS2012 code analysis warning C6001: - using uninitialized memory 'cbDecoded'. libssh2 wincng.c 553 +- Check the length of data passed to sftp_packet_add() to prevent out-of-bounds reads. -- packet.c: fix possible NULL pointer de-reference within listen_state - - Fixes VS2012 code analysis warning C6011: - dereferencing NULL pointer 'listen_state->channel'. libssh2 packet.c 221 +- Prevent zero-byte allocation in sftp_packet_read() which could lead to an out-of-bounds read. https://libssh2.org/CVE-2019-3858.html -- kex.c: fix possible NULL pointer de-reference with session->kex +- Sanitize padding_length - _libssh2_transport_read(). https://libssh2.org/CVE-2019-3861.html - Fixes VS2012 code analysis warning C6011: - dereferencing NULL pointer 'session->kex'. libssh2 kex.c 1761 + This prevents an underflow resulting in a potential out-of-bounds read if a server sends a too-large padding_length, possibly with malicious intent. -- agent.c: check return code of MapViewOfFile +- Defend against writing beyond the end of the payload in _libssh2_transport_read(). + +- Defend against possible integer overflows in comp_method_zlib_decomp. + +GitHub (14 Mar 2019) +- [Will Cosgrove brought this change] + + Security fixes (#315) - Fixes VS2012 code analysis warning C6387: 'p+4' may be '0': - this does not adhere to the specification for the function - 'memcpy': libssh2 agent.c 330 + * Bounds checks - Fixes VS2012 code analysis warning C6387: 'p' may be '0': - this does not adhere to the specification for the function - 'UnmapViewOfFile': libssh2 agent.c 333 - -- examples on Windows: check for socket return code + Fixes for CVEs + https://www.libssh2.org/CVE-2019-3863.html + https://www.libssh2.org/CVE-2019-3856.html - Fixes VS2012 code analysis warning C28193: - The variable holds a value that must be examined - -- examples on Windows: check for WSAStartup return code + * Packet length bounds check - Fixes VS2012 code analysis warning C6031: - return value ignored: could return unexpected value - -Guenter Knauf (11 Dec 2014) -- wincng.c: silent some more gcc compiler warnings. + CVE + https://www.libssh2.org/CVE-2019-3855.html + + * Response length check + + CVE + https://www.libssh2.org/CVE-2019-3859.html + + * Bounds check + + CVE + https://www.libssh2.org/CVE-2019-3857.html + + * Bounds checking + + CVE + https://www.libssh2.org/CVE-2019-3859.html + + and additional data validation + + * Check bounds before reading into buffers + + * Bounds checking + + CVE + https://www.libssh2.org/CVE-2019-3859.html + + * declare SIZE_MAX and UINT_MAX if needed -- wincng.c: silent gcc compiler warnings. +- [Will Cosgrove brought this change] -- Watcom build: added support for WinCNG build. + fixed type warnings (#309) -- build: updated dependencies in makefiles. +- [Will Cosgrove brought this change] -Daniel Stenberg (4 Dec 2014) -- configure: change LIBS not LDFLAGS when checking for libs - - Closes #289 - - Patch-by: maurerpe + Bumping version number for pending 1.8.1 release -Guenter Knauf (3 Dec 2014) -- MinGW build: some more GNUMakefile tweaks. - - test/GNUmakefile: added architecture autodetection; added switches to - CFLAGS and RCFLAGS to make sure that the right architecture is used. - Added support to build with WinCNG. +Will Cosgrove (4 Mar 2019) +- [Daniel Stenberg brought this change] -- sftpdir.c: added authentication method detection. + _libssh2_string_buf_free: use correct free (#304) - Stuff copied over from ssh2.c to make testing a bit easier. - -- NMake build: fixed LIBS settings. + Use LIBSSH2_FREE() here, not free(). We allow memory function + replacements so free() is rarely the right choice... -- NMake build: added support for WinCNG build. +GitHub (26 Feb 2019) +- [Will Cosgrove brought this change] -- MinGW build: some GNUMakefile tweaks. + Fix for building against libreSSL #302 - Added architecture autodetection; added switches to CFLAGS and - RCFLAGS to make sure that the right architecture is used. - Added support to build with WinCNG. + Changed to use the check we use elsewhere. -- MinGW build: Fixed redefine warnings. +- [Will Cosgrove brought this change] -- Updated copyright year. + Fix for when building against LibreSSL #302 -Daniel Stenberg (31 Aug 2014) -- COPYING: bump the copyright year +Will Cosgrove (25 Feb 2019) +- [gartens brought this change] -Dan Fandrich (28 Jul 2014) -- docs: fixed a bunch of typos + docs: update libssh2_hostkey_hash.3 [ci skip] (#301) -- docs: added missing libssh2_session_handshake.3 file +GitHub (21 Feb 2019) +- [Will Cosgrove brought this change] -Marc Hoersken (19 May 2014) -- wincng.c: specify the required libraries for dependencies using MSVC - - Initially reported by Bob Kast as "for MS VS builds, specify the - libraries that are required so they don't need to go into all - project files that may use this library". Thanks a lot. + fix malloc/free mismatches #296 (#297) -- [Bob Kast brought this change] +- [Will Cosgrove brought this change] - windows build: do not export externals from static library - - If you are building a DLL, then you need to explicitly export each - entry point. When building a static library, you should not. - - libssh2 was exporting the entry points whether it was building a DLL or a - static library. To elaborate further, if libssh2 was used as a static - library, which was being linked into a DLL, the libssh2 API would be - exported from that separate DLL. + Replaced malloc with calloc #295 -Daniel Stenberg (19 May 2014) -- [Mikhail Gusarov brought this change] +- [Will Cosgrove brought this change] - Fix typos in manpages + Abstracted OpenSSL calls out of hostkey.c (#294) -Marc Hoersken (18 May 2014) -- wincng.c: Fixed memory leak in case of an error during ASN.1 decoding +- [Will Cosgrove brought this change] -- configure: Display individual crypto backends on separate lines + Fix memory dealloc impedance mis-match #292 (#293) - This avoids line-wrapping in between parameters and makes the - error message look like the following: - - configure: error: No crypto library found! - Try --with-libssl-prefix=PATH - or --with-libgcrypt-prefix=PATH - or --with-wincng on Windows + When using ed25519 host keys and a custom memory allocator. -- [Bob Kast brought this change] +- [Will Cosgrove brought this change] - libssh2_priv.h: a 1 bit bit-field should be unsigned + Added call to OpenSSL_add_all_digests() #288 - some compilers may not like this - -- knownhost.c: Fixed warning that pointer targets differ in signedness + For OpenSSL 1.0.x we need to call OpenSSL_add_all_digests(). -- wincng.c: Fixed warning about pointer targets differing in signedness +Will Cosgrove (12 Feb 2019) +- [Zhen-Huan HWANG brought this change] -- tcpip-forward.c: Fixed warning that pointer targets differ in signedness + SFTP: increase maximum packet size to 256K (#268) - libssh2_channel_forward_listen_ex uses ints instead of unsigned ints. + to match implementations like OpenSSH. -- misc.c: Fixed warning about mixed declarations and code +- [Zenju brought this change] -- libgcrypt.h: Fixed warning about pointer targets differing in signedness + Fix https://github.com/libssh2/libssh2/pull/271 (#284) -- wincng.h: Fixed warning about pointer targets differing in signedness +GitHub (16 Jan 2019) +- [Will Cosgrove brought this change] -- misc.c: Fixed warning about unused parameter abstract + Agent NULL check in shutdown #281 -- tcpip-forward.c: Removed unused variables shost, sport and sockopt +Will Cosgrove (15 Jan 2019) +- [Adrian Moran brought this change] -- wincng.h: Added forward declarations for all WinCNG functions + mbedtls: Fix leak of 12 bytes by each key exchange. (#280) - Initially reported by Bob Kast as "Wincng - define function - prototypes for wincng routines". Thanks a lot. + Correctly free ducts by calling _libssh2_mbedtls_bignum_free() in dtor. + +- [alex-weaver brought this change] + + Fix error compiling on Win32 with STDCALL=ON (#275) + +GitHub (8 Nov 2018) +- [Will Cosgrove brought this change] + + Allow default permissions to be used in sftp_mkdir (#271) - Also replaced structure definitions with type definitions. + Added constant LIBSSH2_SFTP_DEFAULT_MODE to use the server default permissions when making a new directory -- [Bob Kast brought this change] +Will Cosgrove (13 Sep 2018) +- [Giulio Benetti brought this change] - libssh2.h: on Windows, a socket is of type SOCKET, not int + openssl: fix dereferencing ambiguity potentially causing build failure (#267) + + When dereferencing from *aes_ctr_cipher, being a pointer itself, + ambiguity can occur; fixed possible build errors. -- win32: Added WinCNG targets to generated Visual Studio project +Viktor Szakats (12 Sep 2018) +- win32/GNUmakefile: define HAVE_WINDOWS_H - Inspired by Bob Kast's reports, this commit enables the compilation - of libssh2 with WinCNG using the generated Visual Studio project files. - This commit adds WinCNG support to parts of the existing Win32 build - infrastructure, until new build systems, like pre-defined VS project - files or CMake files may be added. + This macro was only used in test/example code before, now it is + also used in library code, but only defined automatically by + automake/cmake, so let's do the same for the standalone win32 + make file. - This commit and b20bfeb3e519119a48509a1099c06d65aa7da1d7 raise one - question: How to handle build systems, like VS project files, that - need to include all source files regardless of the desired target, - including all supported crypto backends? For now the mentioned commit - added a check for LIBSSH2_OPENSSL to openssl.c and with this commit - the supported crypto backends are hardcoded within Makefile.am. + It'd be probably better to just rely on the built-in _WIN32 macro + to detect the presence of windows.h though. It's already used + in most of libssh2 library code. There is a 3rd, similar macro + named LIBSSH2_WIN32, which might also be replaced with _WIN32. + + Ref: https://github.com/libssh2/libssh2/commit/8b870ad771cbd9cd29edbb3dbb0878e950f868ab + Closes https://github.com/libssh2/libssh2/pull/266 -- libssh2_priv msvc: Removed redundant definition of inline keyword +Marc Hoersken (2 Sep 2018) +- Fix conditional check for HAVE_DECL_SECUREZEROMEMORY - Initially reported by Bob Kast as "Remove redundant 'inline' define". - Thanks a lot. + "Unlike the other `AC_CHECK_*S' macros, when a symbol is not declared, + HAVE_DECL_symbol is defined to `0' instead of leaving HAVE_DECL_symbol + undeclared. When you are sure that the check was performed, + use HAVE_DECL_symbol in #if." + + Source: autoconf documentation for AC_CHECK_DECLS. -- wincng: Made data parameter to hash update function constant +- Fix implicit declaration of function 'SecureZeroMemory' - Initially reported by Bob Kast as "formal parameter must be const - since it is used in contexts where the actual parameter may be const". - Thanks a lot. + Include window.h in order to use SecureZeroMemory on Windows. -- wincng: fix cross-compilation against the w64 mingw-runtime package +- Fix implicit declaration of function 'free' by including stdlib.h -- openssl: Check for LIBSSH2_OPENSSL in order to compile with openssl +GitHub (27 Aug 2018) +- [Will Cosgrove brought this change] -- wincng: Fixed use of possible uninitialized variable pPaddingInfo + Use malloc abstraction function in pem parse - Reported by Bob Kast, thanks a lot. + Fix warning on WinCNG build. -- wincng: Added cast for double to unsigned long conversion +- [Will Cosgrove brought this change] -- wincng: Cleaned up includes and check NTSTATUS using macro + Fixed possible junk memory read in sftp_stat #258 + +- [Will Cosgrove brought this change] + + removed INT64_C define (#260) - Removed header file combination that is not supported on a real - Windows platform and can only be compiled using MinGW. Replaced - custom NTSTATUS return code checks with BCRYPT_SUCCESS macro. + No longer used. -Daniel Stenberg (16 Mar 2014) -- userauth_hostbased_fromfile: zero assign to avoid uninitialized use +- [Will Cosgrove brought this change] + + Added conditional around engine.h include + +Will Cosgrove (6 Aug 2018) +- [Alex Crichton brought this change] + + Fix OpenSSL link error with `no-engine` support (#259) - Detected by clang-analyze + This commit fixes linking against an OpenSSL library that was compiled with + `no-engine` support by bypassing the initialization routines as they won't be + available anyway. -- channel_receive_window_adjust: store windows size always +GitHub (2 Aug 2018) +- [Will Cosgrove brought this change] + + ED25519 Key Support #39 (#248) - Avoid it sometimes returning without storing it, leaving calling - functions with unknown content! + OpenSSH Key and ED25519 support #39 + Added _libssh2_explicit_zero() to explicitly zero sensitive data in memory #120 - Detected by clang-analyzer + * ED25519 Key file support - Requires OpenSSL 1.1.1 or later + * OpenSSH Key format reading support - Supports RSA/DSA/ECDSA/ED25519 types + * New string buffer reading functions - These add build-in bounds checking and convenance methods. Used for OpenSSL PEM file reading. + * Added new tests for OpenSSH formatted Keys -- publickey_packet_receive: avoid junk in returned pointers +- [Will Cosgrove brought this change] + + ECDSA key types are now explicit (#251) - clang-analyzer found this risk it would return a non-initialized pointer - in a success case + * ECDSA key types are now explicit + + Issue was brough up in pull request #248 -Peter Stuge (16 Mar 2014) -- [Marc Hoersken brought this change] +Will Cosgrove (2 May 2018) +- [Jakob Egger brought this change] - Added Windows Cryptography API: Next Generation based backend + Add Instructions for building from Master (#249) -- [Marc Hoersken brought this change] +GitHub (27 Apr 2018) +- [Will Cosgrove brought this change] - knownhost.c: fixed that 'key_type_len' may be used uninitialized + Initialize sb_intl #226 + +Will Cosgrove (19 Apr 2018) +- [doublex brought this change] + + buffer overflow (valgrind) (#159) + +- [Brendan Shanks brought this change] + + mbedTLS: Remove some C99-style intermingled variable declarations (#196) + +GitHub (18 Apr 2018) +- [Will Cosgrove brought this change] + + fix for #160 + +Will Cosgrove (18 Apr 2018) +- [doublex brought this change] + + fix memory leak when using mbedtls backend (#158) - ../src/knownhost.c: In function 'libssh2_knownhost_readline': - ../src/knownhost.c:651:16: warning: 'key_type_len' may be used - uninitialized in this function [-Wmaybe-uninitialized] - rc = knownhost_add(hosts, hostbuf, NULL, - ^ - ../src/knownhost.c:745:12: note: 'key_type_len' was declared here - size_t key_type_len; - ^ + _libssh2_bn_init_from_bin/_libssh2_bn_free would leak bignum from mbedtls_calloc(). -- [Marc Hoersken brought this change] +- [Brendan Shanks brought this change] - pem.c: always compile pem.c independently of crypto backend + mbedTLS: Avoid multiple definition errors for context handles (#197) -- Fix non-autotools builds: Always define the LIBSSH2_OPENSSL CPP macro +- [Tseng Jun brought this change] + + Fix the EVP cipher meth memory leakage problem (#244) - Commit d512b25f69a1b6778881f6b4b5ff9cfc6023be42 introduced a crypto - library abstraction in the autotools build system, to allow us to more - easily support new crypto libraries. In that process it was found that - all other build system which we support are hard-coded to build with - OpenSSL. Commit f5c1a0d98bd51aeb24aca3d49c7c81dcf8bd858d fixes automake - introduced into non-autotools build systems but still overlooked the - CPP macro saying that we are using OpenSSL. + * Fix the EVP cipher meth memory leakage problem - Thanks to Marc Hörsken for identifying this issue and proposing a fix - for win32/{GNUmakefile,config.mk}. This commit uses a slightly different - approach but the end result is the same. + Looks good, thanks for the fixes. -Dan Fandrich (15 Mar 2014) -- channel_close: Close the channel even in the case of errors +Marc Hörsken (31 Mar 2018) +- [Will Cosgrove brought this change] -- sftp_close_handle: ensure the handle is always closed + Added ECDSA defines for WinCNG (#245) - Errors are reported on return, but otherwise the close path is - completed as much as possible and the handle is freed on exit. + Fixed missing defines preventing building using WinCNG -Alexander Lamaison (6 Mar 2014) -- knownhost: Restore behaviour of `libssh2_knownhost_writeline` with short buffer. +GitHub (30 Mar 2018) +- [Will Cosgrove brought this change] + + Fix for _libssh2_rsa_new with OpenSSL 1.0.x - Commit 85c6627c changed the behaviour of `libssh2_knownhost_writeline` so that it stopped returning the number of bytes needed when the given buffer was too small. Also, the function changed such that is might write to part of the buffer before realising it is too small. + missing d value assignment. + +Will Cosgrove (20 Mar 2018) +- [Etienne Samson brought this change] + + A collection of small fixes (#198) - This commit restores the original behaviour, whilst keeping the unknown-key-type functionality that 85c6627c. Instead of writing to the buffer piecemeal, the length of the various parts is calculated up front and the buffer written only if there is enough space. The calculated necessary size is output in `outlen` regardless of whether the buffer was written to. + * tests: Remove if-pyramids - The main use-case for the original behaviour that this commit restores is to allow passing in a NULL buffer to get the actual buffer size needed, before calling the function again with the buffer allocated to the exact size required. - -- knownhost: Fix DSS keys being detected as unknown. + * tests: Switch run_command arguments - I missing `else` meant ssh-dss format keys were being re-detected as unknown format. + * tests: Make run_command a vararg function + + * tests: Xcode doesn't obey CMake's test working directory + + * openssl: move manual AES-CTR cipher into crypto init + + * cmake: Move our include dir before all other include paths -Dan Fandrich (6 Mar 2014) -- knownhosts: Abort if the hosts buffer is too small +GitHub (15 Mar 2018) +- [Will Cosgrove brought this change] + + Fixes incorrect indexing of KEX prefs string - This could otherwise cause a match on the wrong host + After stripping out an invalid KEX pref entry, it would incorrectly advance again leaving invalid values in the list. -- agent_list_identities: Fixed memory leak on OOM +Viktor Szakats (13 Mar 2018) +- tests: fix checksrc warnings + + Also: + * add 'static' qualifier to file-wide const buffers + * fix a non-ANSI C89 comment + * silence a mismatched fprintf() mask warning by adding a cast -- Fixed a few typos +- cmake: recognize OpenSSL 1.1 .dll names + + Also fix some comment typos and a stray tab. -- userauth: Fixed an attempt to free from stack on error +- docs: update an URL [ci skip] -- Fixed a few memory leaks in error paths +Daniel Stenberg (12 Mar 2018) +- docs/SECURITY: the max embargo is 14 days now -- Fixed two potential use-after-frees of the payload buffer - - The first might occur if _libssh2_packet_add returns an error, as - fullpacket_state wasn't reset to idle so if it were possible for - fullpacket to be called again, it would return to the same state - handler and re-use the freed p->packet buffer. +Viktor Szakats (12 Mar 2018) +- docs: spelling fixes [ci skip] - The second could occur if decrypt returned an error, as it freed the - packet buffer but did not clear total_num, meaning that freed buffer - could be written into again later. + Closes https://github.com/libssh2/libssh2/pull/222 -Alexander Lamaison (28 Nov 2013) -- Fix missing `_libssh2_error` in `_libssh2_channel_write`. - - In one case, the error code from `_libssh2_transport_read` was being returned from `_libssh2_channel_write` without setting it as the last error by calling `_libssh2_error`. This commit fixes that. +GitHub (12 Mar 2018) +- [Will Cosgrove brought this change] + + Fixed minor tabs/spacing issues + +- [Will Cosgrove brought this change] + + Update kex.c + +- [Will Cosgrove brought this change] + + Added basic bounds checking #206 - Found when using a session whose socket had been inadvertently destroyed. The calling code got confused because via `libssh2_session_last_error` it appeared no error had occurred, despite one being returned from the previous function. + Basic bounds checking in ecdh_sha2_nistp() -Kamil Dudka (21 Nov 2013) -- [Mark McPherson brought this change] +- [Will Cosgrove brought this change] - openssl: initialise the digest context before calling EVP_DigestInit() + Fixed Clang warning #206 - When using the OpenSSL libraries in FIPS mode, the function call - EVP_DigestInit() is actually #defined to FIPS_digestinit(). - Unfortunately wheres EVP_DigestInit() initialises the context and then - calls EVP_DigestInit_ex(), this function assumes that the context has - been pre-initialised and crashes when it isn't. + Fixed possible garbage value for secret in an error case + +- [Will Cosgrove brought this change] + + Fixed incorrect #if to #ifdef #206 - Bug: https://trac.libssh2.org/ticket/279 + When checking HAVE_OPAQUE_STRUCTS. + +Viktor Szakats (12 Mar 2018) +- src: suppress two checksrc warnings - Fixes #279 + Ref: https://github.com/libssh2/libssh2/pull/235 -- [Marc Hörsken brought this change] +- src: address fopen() warnings, add missing copyright headers + + Ref: https://github.com/libssh2/libssh2/pull/235 - .gitignore: Ignore files like src/libssh2_config.h.in~ +- src: replace sprintf() with snprintf() + + Ref: https://github.com/libssh2/libssh2/pull/235 -Peter Stuge (13 Nov 2013) -- Move automake conditionals added by commit d512b25f out of Makefile.inc +- src: fix checksrc warnings - Commit d512b25f69a1b6778881f6b4b5ff9cfc6023be42 added automake - conditionals to Makefile.inc but since Makefile.inc is included - from Makefile for all other build systems that does not work. + Use checksrc.pl from the curl project, with (for now) + suppressed long line warnings and indentation set to + 4 spaces. Fixes are whitespace for the most part. - This commit instead adds Makefile.OpenSSL.inc and Makefile.libgcrypt.inc - and moves the automake conditional to its proper place, src/Makefile.am. + Warning count went down from 2704 to 12. - The automake conditional includes the correct Makefile.$name.inc per - the crypto library selection/detection done by configure. + Also fix codespell typos, two non-ANSI C89 comments + and a stray tab in include/libssh2.h. - All non-autotools build system files in libssh2 are hardcoded to use - OpenSSL and do not get a conditional but at least there is some reuse - because they can all include the new Makefile.OpenSSL.inc. + Ref: https://github.com/libssh2/libssh2/pull/235 -Daniel Stenberg (27 Oct 2013) -- [Salvador Fandino brought this change] - - Set default window size to 2MB - - The default channel window size used until now was 256KB. This value is - too small and results on a bottleneck on real-life networks where - round-trip delays can easily reach 300ms. +- checksrc: add source style checker - The issue was not visible because the configured channel window size - was being ignored and a hard-coded value of ~22MB being used instead, - but that was fixed on a previous commit. - - This patch just changes the default window size - (LIBSSH2_CHANNEL_WINDOW_DEFAULT) to 2MB. It is the same value used by - OpenSSH and in our opinion represents a good compromise between memory - used and transfer speed. + This is a slightly extended version of this original source + from the curl project: + https://github.com/curl/curl/blob/8b754c430b9a4c51aa606c687ee5014faf7c7b06/lib/checksrc.pl - Performance tests were run to determine the optimum value. The details - and related discussion are available from the following thread on the - libssh2 mailing-list: + This version adds the following options to customize it for + libssh2 (plus some whitespace formatting): - http://www.libssh2.org/mail/libssh2-devel-archive-2013-10/0018.shtml - http://article.gmane.org/gmane.network.ssh.libssh2.devel/6543 + `-i` to override indentation spaces (2) + `-m` to override maximum line length (79) - An excerpt follows: + Command-line used to check libssh2 sources: - "I have been running some transfer test and measuring their speed. + $ ./checksrc.pl -i4 -m500 *.c *.h - My setup was composed of a quad-core Linux machine running Ubuntu 13.10 - x86_64 with a LXC container inside. The data transfers were performed - from the container to the host (never crossing through a physical - network device). + Closes https://github.com/libssh2/libssh2/pull/236 + +- src: add static qualifier - Network delays were simulated using the tc tool. And ping was used to - verify that they worked as intended during the tests. + To private, const strings. - The operation performed was the equivalent to the following ssh command: + Closes https://github.com/libssh2/libssh2/pull/237 + +- [Will Cosgrove brought this change] + + Add support for ECDSA keys and host keys (#41) - $ ssh container "dd bs=16K count=8K if=/dev/zero" >/dev/null + This commit lands full ECDSA key support when using the OpenSSL + backend. Which includes: - Though, establishment and closing of the SSH connection was excluded - from the timings. + New KEX methods: + ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, ecdsa-sha2-nistp521 - I run the tests several times transferring files of sizes up to 128MB - and the results were consistent between runs. + Can now read OpenSSL formatted ECDSA key files. - The results corresponding to the 128MB transfer are available here: + Now supports known host keys of type ecdsa-sha2-nistp256. - https://docs.google.com/spreadsheet/ccc?key=0Ao1yRmX6PQQzdG5wSFlrZl9HRWNET3ZyN0hnaGo5ZFE&usp=sharing + New curve types: + NID_X9_62_prime256v1, NID_secp384r1, NID_secp521r1 - It clearly shows that 256KB is too small as the default window size. - Moving to a 512MB generates a great improvement and after the 1MB mark - the returns rapidly diminish. Other factors (TCP window size, probably) - become more limiting than the channel window size + Default host key preferred ordering is now nistp256, nistp384, + nistp521, rsa, dss. - For comparison I also performed the same transfers using OpenSSH. Its - speed is usually on par with that of libssh2 using a window size of 1MB - (even if it uses a 2MB window, maybe it is less aggressive sending the - window adjust msgs)." + Ref: https://github.com/libssh2/libssh2/issues/41 - Signed-off-by: Salvador Fandino + Closes https://github.com/libssh2/libssh2/pull/206 -- [Salvador brought this change] +GitHub (15 Dec 2017) +- [Will Cosgrove brought this change] - _libssh2_channel_read: Honour window_size_initial - - _libssh2_channel_read was using an arbitrary hard-coded limit to trigger - the window adjusting code. The adjustment used was also hard-coded and - arbitrary, 15MB actually, which would limit the usability of libssh2 on - systems with little RAM. - - This patch, uses the window_size parameter passed to - libssh2_channel_open_ex (stored as remote.window_size_initial) plus the - buflen as the base for the trigger and the adjustment calculation. + Fixed possible crash when decoding invalid data - The memory usage when using the default window size is reduced from 22MB - to 256KB per channel (actually, if compression is used, these numbers - should be incremented by ~50% to account for the errors between the - decompressed packet sizes and the predicted sizes). + When trying to decode invalid data, it frees the buffer but doesn't nil it so the caller gets a junk memory pointer which they could potentially double free. + +- [Will Cosgrove brought this change] + + Remove call to OpenSSL_add_all_ciphers() - My tests indicate that this change does not impact the performance of - transfers across localhost or a LAN, being it on par with that of - OpenSSH. On the other hand, it will probably slow down transfers on - networks with high bandwidth*delay when the default window size - (LIBSSH2_CHANNEL_WINDOW_DEFAULT=256KB) is used. + Now lives in libssh2 init() from PR #189 + +- [Will Cosgrove brought this change] + + Fixed incorrect reference to decrypted block - Signed-off-by: Salvador Fandino + Fixed incorrectly copied memory from p->buf into init instead of from the decrypted buffer block. The only reason this worked was because the crypt() function decrypts the value in-place and overwrites p->buf. I'm working on a fork that no longer does this and exposed this bug. -- [Salvador Fandino brought this change] +Will Cosgrove (20 Oct 2017) +- [Pan brought this change] - knownhosts: handle unknown key types + Fix typo in crypt.c (#218) + +Kamil Dudka (17 Oct 2017) +- session: avoid printing misleading debug messages - Store but don't use keys of unsupported types on the known_hosts file. + ... while throwing LIBSSH2_ERROR_EAGAIN out of session_startup() - Currently, when libssh2 parses a known_host file containing keys of some - type it doesn't natively support, it stops reading the file and returns - an error. + If the session runs in blocking mode, LIBSSH2_ERROR_EAGAIN never reaches + the libssh2 API boundary and, in non-blocking mode, these messages are + suppressed by the condition in _libssh2_error_flags() anyway. - That means, that the known_host file can not be safely shared with other - software supporting other key types (i.e. OpenSSH). + Closes #211 + +Viktor Szakats (15 Oct 2017) +- win32/GNUmakefile: allow customizing dll suffixes - This patch adds support for handling keys of unknown type. It can read - and write them, even if they are never going to be matched. + - New `LIBSSH2_DLL_SUFFIX` envvar will add a suffix to the generated + libssh2 dll name. Useful to add `-x64` to 64-bit builds so that + it can live in the same directory as the 32-bit one. By default + this is empty. - At the source level the patch does the following things: + - New `LIBSSH2_DLL_A_SUFFIX` envvar to customize the suffix of the + generated import library (implib) for libssh2 .dll. It defaults + to `dll`, and it's useful to modify that to `.dll` to have the + standard naming scheme for mingw-built .dlls, i.e. `libssh2.dll.a`. - - add a new unknown key type LIBSSH2_KNOWNHOST_KEY_UNKNOWN + Ref: https://github.com/curl/curl/commit/aaa16f80256abc1463fd9374815130a165222257 - - add a new slot (key_type_name) on the known_host struct that is - used to store the key type in ascii form when it is not supported + Closes https://github.com/libssh2/libssh2/pull/215 + +- makefile.m32: allow to override gcc, ar and ranlib - - parse correctly known_hosts entries with unknown key types and - populate the key_type_name slot + Allow to ovverride certain build tools, making it possible to + use LLVM/Clang to build libssh2. The default behavior is unchanged. + To build with clang (as offered by MSYS2), these settings can + be used: - - print correctly known_hosts entries of unknown type + LIBSSH2_CC=clang + LIBSSH2_AR=llvm-ar + LIBSSH2_RANLIB=llvm-ranlib - - when checking a host key ignore keys that do not match the key + Also adjust ranlib parameters to be compatible with LLVM/Clang's + ranlib tool. - Fixes #276 + Closes https://github.com/libssh2/libssh2/pull/214 -- windows build: fix build errors +GitHub (27 Sep 2017) +- [Will Cosgrove brought this change] + + Fixes out of bounds memory access (#210) - Fixes various link errors with VS2010 + If an invalid PEM file is read and the lines are longer than 128 characters it will go out of bounds and crash on line 91. + +Will Cosgrove (11 Sep 2017) +- [Kamil Dudka brought this change] + + scp: do not NUL-terminate the command for remote exec (#208) - Reported-by: "kdekker" - Fixes #272 + It breaks SCP download/upload from/to certain server implementations. + + The bug does not manifest with OpenSSH, which silently drops the NUL + byte (eventually with any garbage that follows the NUL byte) before + executing it. + + Bug: https://bugzilla.redhat.com/1489736 -- man page: add missing function argument +GitHub (21 Aug 2017) +- [Viktor Szakats brought this change] + + openssl.c: remove no longer used variable (#204) - for libssh2_userauth_publickey_fromfile_ex() + after e378d2e30a40bd9bcee06dc3a4250f269098e200 + +- [Will Cosgrove brought this change] + + Fix for #188 (#189) - Reported-by: "pastey" + * Update openssl.c - Fixes #262 + * Create openssl.h -- [Salvador brought this change] +Will Cosgrove (24 May 2017) +- [Marcel Raad brought this change] - Fix zlib deflate usage + openssl: fix build with OpenSSL 1.1 API (#176) - Deflate may return Z_OK even when not all data has been compressed - if the output buffer becomes full. + When building with OPENSSL_API_COMPAT=0x10100000L, OpenSSL_add_all_algorithms + and OpenSSL_add_all_ciphers don't exist. The corresponding functionality is + handled automatically with OpenSSL 1.1. + +- [Sune Bredahl brought this change] + + Add support for SHA256 hostkey fingerprints (#180) - In practice this is very unlikely to happen because the output buffer - size is always some KBs larger than the size of the data passed for - compression from the upper layers and I think that zlib never expands - the data so much, even on the worst cases. + Looks good, thanks! + +GitHub (12 May 2017) +- [Will Cosgrove brought this change] + + Fix memory leak of crypt_ctx->h using openSSL 1.1+ (#177) - Anyway, this patch plays on the safe side checking that the output - buffer is not exhausted. + Need to use EVP_CIPHER_CTX_free instead of EVP_CIPHER_CTX_reset. + +Marc Hoersken (2 Mar 2017) +- tests/openssh_server/authorized_keys: add key_rsa_encrypted.pub + +- tests: add simple test for passphrase-protected PEM file support + +- os400qc3: enable passphrase-protected PEM file support using pem.c + +- pem: fix indentation and replace assert after 386e012292 + +- [Keno Fischer brought this change] + + pem: add passphrase-protected PEM file support for libgcrypt and wincng - Signed-off-by: Salvador + Since they use our own PEM parser which did not support encrypted + PEM files, trying to use such files on these backends failed. + Fix that by augmenting the PEM parser to support encrypted PEM files. -- [Salvador brought this change] +- [Thomas brought this change] - comp_method_zlib_decomp: Improve buffer growing algorithm + misc: use time constant implementation for AES CTR increment + +- [Thomas brought this change] + + wincng: add AES CTR mode support (aes128-ctr, aes192-ctr, aes256-ctr) + +- [Thomas brought this change] + + openssl: move shared AES-CTR code into misc + +Daniel Stenberg (20 Dec 2016) +- [Alex Crichton brought this change] + + kex: acknowledge error code from libssh2_dh_key_pair() - The old algorithm was O(N^2), causing lots and lots of reallocations - when highly compressed data was transferred. + Fixes a segfault using ssh-agent on Windows - This patch implements a simpler one that just doubles the buffer size - everytime it is exhausted. It results in O(N) complexity. + This commit fixes a segfault seen dereferencing a null pointer on + Windows when using ssh-agent. The problem ended up being that errors + weren't being communicated all the way through, causing null pointers to + be used when functions should have bailed out sooner. - Also a smaller inflate ratio is used to calculate the initial size (x4). + The `_libssh2_dh_key_pair` function for WinCNG was modified to propagate + errors, and then the two callsites in kex.c of + `diffie_hellman_sha{1,256}` were updated to recognize this error and + bail out. - Signed-off-by: Salvador + Fixes #162 + Closes #163 -- [Salvador brought this change] +Alexander Lamaison (27 Nov 2016) +- [monnerat brought this change] - Fix zlib usage + Implement Diffie-Hellman computations in crypto backends. (#149) - Data may remain in zlib internal buffers when inflate() returns Z_OK - and avail_out == 0. In that case, inflate has to be called again. + Not all backends feature the low level API needed to compute a Diffie-Hellman + secret, but some of them directly implement Diffie-Hellman support with opaque + private data. The later approach is now generalized and backends are + responsible for all Diffie Hellman computations. + As a side effect, procedures/macros _libssh2_bn_rand and _libssh2_bn_mod_exp + are no longer needed outside the backends. + +Peter Stuge (16 Nov 2016) +- acinclude.m4: The mbedtls crypto backend actually requires libmbedcrypto - Also, once all the data has been inflated, it returns Z_BUF_ERROR to - signal that the input buffer has been exhausted. + Examples can't be linked with libmbedtls but need libmbedcrypto, and + any users of libssh2 which use libtool and libssh2.la would encounter + the same problem. - Until now, the way to detect that a packet payload had been completely - decompressed was to check that no data remained on the input buffer - but that didn't account for the case where data remained on the internal - zlib buffers. - - That resulted in packets not being completely decompressed and the - missing data reappearing on the next packet, though the bug was masked - by the buffer allocation algorithm most of the time and only manifested - when transferring highly compressible data. - - This patch fixes the zlib usage. + This changes the mbedtls detection to search for libmbedcrypto, which + is the actual dependency for the backend. + +- acinclude.m4: Add CPPFLAGS=-I$prefix-dir/include in LIBSSH2_LIB_HAVE_LINKFLAGS - Signed-off-by: Salvador + This is absolutely neccessary for header files to be found when + AC_LIB_HAVE_LINKFLAGS searches for libraries. -- [Salvador brought this change] +- acinclude.m4: Make saved variables in LIBSSH2_LIB_HAVE_LINKFLAGS uniform - _libssh2_channel_read: fix data drop when out of window +- docs/HACKING.CRYPTO: Improve documentation for autoconf build system + +Alexander Lamaison (16 Nov 2016) +- [Alex Arslan brought this change] + + Check for netinet/in.h in the tests cmake file (#148) + +- [Patrick Monnerat brought this change] + + Define new Diffie-Hellman context for mbedTLS + +- [monnerat brought this change] + + Make libssh2 work again on os400. (#118) - After filling the read buffer with data from the read queue, when the - window size was too small, "libssh2_channel_receive_window_adjust" was - called to increase it. In non-blocking mode that function could return - EAGAIN and, in that case, the EAGAIN was propagated upwards and the data - already read on the buffer lost. + * os400: minimum supported OS version is now V6R1. + Do not log compiler informational messages. - The function was also moving between the two read states - "libssh2_NB_state_idle" and "libssh2_NB_state_created" both of which - behave in the same way (excepting a debug statment). + * Implement crypto backend specific Diffie-Hellman computation. - This commit modifies "_libssh2_channel_read" so that the - "libssh2_channel_receive_window_adjust" call is performed first (when - required) and if everything goes well, then it reads the data from the - queued packets into the read buffer. + This feature is now needed on os400 because the QC3 library does not + implement bn_mod_exp() natively. Up to now, this function was emulated using + an RSA encryption, but commits ca5222ea819cc5ed797860070b4c6c1aeeb28420 and + 7934c9ce2a029c43e3642a492d3b9e494d1542be (CVE-2016-0787) broke the emulation + because QC3 only supports RSA exponents up to 512 bits. - It also removes the useless "libssh2_NB_state_created" read state. + Happily, QC3 supports a native API for Diffie-Hellman computation, with + opaque random value: this commit implements the use of this API and, as a + side effect, enables support of this feature for any other crypto backend that + would use it. - Some rotted comments have also been updated. + A "generic" Diffie-Hellman computation internal API supports crypto backends + not implementing their own: this generic API uses the same functions as before. - Signed-off-by: Salvador + * Fix typos in docs/HACKING.CRYPTO. -- [Salvador Fandino brought this change] +- [Peter Stuge brought this change] - window_size: redid window handling for flow control reasons + acinclude.m4: Fixup OpenSSL EVP_aes_128_ctr() detection + +- [Peter Stuge brought this change] + + configure.ac: Add --with-crypto= instead of many different --with-$backend - Until now, the window size (channel->remote.window_size) was being - updated just after receiving the packet from the transport layer. + The new --with-crypto option replaces the previous backend-specific + --with-{openssl,libgcrypt,mbedtls,wincng} options and fixes some issues. - That behaviour is wrong because the channel queue may grow uncontrolled - when data arrives from the network faster that the upper layer consumes - it. + * libgcrypt or mbedtls would previously be used whenever found, even + if configure was passed --without-libgcrypt or --without-mbedtls. - This patch adds a new counter, read_avail, which keeps a count of the - bytes available from the packet queue for reading. Also, now the window - size is adjusted when the data is actually read by an upper layer. + * If --with-$backend was specified then configure would not fail even + if that library could not be found, and would instead use whichever + crypto library was found first. - That way, if the upper layer stops reading data, the window will - eventually fill and the remote host will stop sending data. When the - upper layers reads enough data, a window adjust packet is delivered and - the transfer resumes. + The new option defaults to `auto`, which makes configure check for all + supported crypto libraries in turn, choosing the first one found, or + exiting with an error if none can be found. + +- [Tony Kelman brought this change] + + Build mbedtls from source on Travis (#133) - The read_avail counter is used to detect the situation when the remote - server tries to send data surpassing the window size. In that case, the - extra data is discarded. + * Revert "Revert "travis: Test mbedtls too"" - Signed-off-by: Salvador - -Peter Stuge (15 Sep 2013) -- configure.ac: Call zlib zlib and not libz in text but keep option names + This reverts commit c4c60eac5ca756333034b07dd9e0b97741493ed3. + + * travis: Build mbedtls from source on Travis + + Use TOOLCHAIN_OPTION when calling cmake on mbedtls + + * tests: only run DSA tests for non-mbedtls + + crypto backends -- configure.ac: Reorder --with-* options in --help output +- [Peter Stuge brought this change] -- configure.ac: Rework crypto library detection + configure.ac src/Makefile.am: Remove dead AM_CONDITIONAL(OS400QC3) - This further simplifies adding new crypto libraries. + According to os400/README400 this backend can not be built + with configure+make, and the conditional is hard coded to false. -- Clean up crypto library abstraction in build system and source code +- [Peter Stuge brought this change] + + configure.ac: Add -DNDEBUG to CPPFLAGS in non-debug builds - libssh2 used to explicitly check for libgcrypt and default to OpenSSL. + There are a few uses of assert() in channel.c, sftp.c and transport.c. + +- [Peter Stuge brought this change] + + src/global.c: Fix conditional AES-CTR support - Now all possible crypto libraries are checked for explicitly, making - the addition of further crypto libraries both simpler and cleaner. + Most of libssh2 already has conditional support for AES-CTR according to + the LIBSSH2_AES_CTR crypto backend #define, but global.c needed fixing. -- configure.ac: Add zlib to Requires.private in libssh2.pc if using zlib +- [Peter Stuge brought this change] -- Revert "Added Windows Cryptography API: Next Generation based backend" + src/crypto.h src/userauth.c: Fix conditional RSA support - This reverts commit d385230e15715e67796f16f3e65fd899f21a638b. + Most of libssh2 already has conditional support for RSA according to + the LIBSSH2_RSA crypto backend #define, but crypto.h and userauth.c + needed a few small fixes. -Daniel Stenberg (7 Sep 2013) -- [Leif Salomonsson brought this change] +- [Peter Stuge brought this change] - sftp_statvfs: fix for servers not supporting statfvs extension + src/kex.c: Cast libssh2_sha{1,256}_update data arguments properly - Fixes issue arising when server does not support statfvs and or fstatvfs - extensions. sftp_statvfs() and sftp_fstatvfs() after this patch will - handle the case when SSH_FXP_STATUS is returned from server. + The update functions take a const unsigned char * but were called + with (const) char * in some places, causing unneccessary warnings. -- [Marc Hoersken brought this change] +- [Peter Stuge brought this change] - Added Windows Cryptography API: Next Generation based backend + docs/HACKING.CRYPTO: Fix two type typos -- [Kamil Dudka brought this change] +- [Sergei Trofimovich brought this change] - partially revert "window_size: explicit adjustments only" + acinclude.m4: fix ./configure --with-libgcrypt - This partially reverts commit 03ca9020756a4e16f0294e5b35e9826ee6af2364 - in order to fix extreme slowdown when uploading to localhost via SFTP. + The change fixes passing of bogus gcrypt prefix. + Reproducible as: - I was able to repeat the issue on RHEL-7 on localhost only. It did not - occur when uploading via network and it did not occur on a RHEL-6 box - with the same version of libssh2. + $ ./configure --with-libgcrypt + $ make V=1 + ... + /bin/sh ../libtool --tag=CC --mode=link gcc -g -O2 -Iyes/include -version-info 1:1:0 -no-undefined -export-symbols-regex '^libssh2_.*' -lgcrypt -lz -Lyes/lib -o libssh2.la -rpath /usr/local/lib channel.lo comp.lo crypt.lo hostkey.lo kex.lo mac.lo misc.lo packet.lo publickey.lo scp.lo session.lo sftp.lo userauth.lo transport.lo version.lo knownhost.lo agent.lo libgcrypt.lo pem.lo keepalive.lo global.lo -lgcrypt + ../libtool: line 7475: cd: yes/lib: No such file or directory + libtool: error: cannot determine absolute directory name of 'yes/lib' - The problem was that sftp_read() used a read-ahead logic to figure out - the window_size, but sftp_packet_read() called indirectly from - sftp_write() did not use any read-ahead logic. - -- _libssh2_channel_write: client spins on write when window full + These + -Iyes/include + -Lyes/lib + come from libgcrypt code autodetection: + if test -n "$use_libgcrypt" && test "$use_libgcrypt" != "no"; then + LDFLAGS="$LDFLAGS -L$use_libgcrypt/lib" + CFLAGS="$CFLAGS -I$use_libgcrypt/include" - When there's no window to "write to", there's no point in waiting for - the socket to become writable since it most likely just will continue to - be. + I assume it's a typo to use yes/no flag as a prefix and changed + it to '$with_libgcrypt_prefix'. - Patch-by: ncm - Fixes #258 + Reported-by: Mikhail Pukhlikov + Signed-off-by: Sergei Trofimovich -- _libssh2_channel_forward_cancel: avoid memory leaks on error - - Fixes #257 +- [Zenju brought this change] -- _libssh2_packet_add: avoid using uninitialized memory - - In _libssh2_packet_add, called by _libssh2_packet_read, a call to - _libssh2_packet_send that is supposed to send a one-byte message - SSH_MSG_REQUEST_FAILURE would send an uninitialized byte upon re-entry - if its call to _send returns _EAGAIN. + libssh2_sftp_init hang: last error not set - Fixes #259 - -- _libssh2_channel_forward_cancel: accessed struct after free + The problem is that the original if statement simply returns NULL, but does not set the session last error code. The consequence is that libssh2_sftp_init() also returns NULL and libssh2_session_last_errno(sshSession) == LIBSSH2_ERROR_NONE. - ... and the assignment was pointless anyway since the struct was about - to be freed. Bug introduced in dde2b094. + In my test the LIBSSH2_ERROR_EAGAIN is coming from sftp.c row 337: + if(4 != sftp->partial_size_len) + /* we got a short read for the length part */ + return LIBSSH2_ERROR_EAGAIN; - Fixes #268 + with "partial_size_len == 0". Not sure if this is expected. -Peter Stuge (2 Jun 2013) -- [Marc Hoersken brought this change] +- [Aidan Hobson Sayers brought this change] - Fixed compilation using mingw-w64 + docs: correctly describe channel_wait_eof + + channel_wait_eof waits for channel->remote.eof, which is set on + receiving a `SSH_MSG_CHANNEL_EOF` message. This message is sent + when a party has no more data to send on a channel. -- [Marc Hoersken brought this change] +- [Zenju brought this change] - knownhost.c: use LIBSSH2_FREE macro instead of free + Fix MSVC 14 compilation warning (#92) - Use LIBSSH2_FREE instead of free since - _libssh2_base64_encode uses LIBSSH2_ALLOC + 1> sftp.c + 1>libssh2-files\src\sftp.c(3393): warning C4456: declaration of 'retcode' hides previous local declaration + 1> libssh2-files\src\sftp.c(3315): note: see declaration of 'retcode' -Daniel Stenberg (18 May 2013) -- [Matthias Kerestesch brought this change] +- [Salvador Fandino brought this change] - libssh2_agent_init: init ->fd to LIBSSH2_INVALID_SOCKET + LIBSSH2_ERROR_CHANNEL_WINDOW_FULL: add new error code - ... previously it was left at 0 which is a valid file descriptor! + In order to signal that the requested operation can not succeed + because the receiving window had been exhausted, the error code + LIBSSH2_ERROR_BUFFER_TOO_SMALL has been reused but I have found + that in certain context it may be ambigous. - Bug: https://trac.libssh2.org/ticket/265 + This patch introduces a new error code, + LIBSSH2_ERROR_CHANNEL_WINDOW_FULL, exclusive to signal that condition. + +- [Salvador Fandino brought this change] + + channel_wait_eof: handle receive window exhaustion - Fixes #265 + Until now, in blocking mode, if the remote receiving window is + exhausted this function hangs forever as data is not read and the + remote side just keeps waiting for the window to grow before sending + more data. + + This patch, makes this function check for that condition and abort + with an error when it happens. -- userauth_password: pass on the underlying error code +- [Salvador Fandino brought this change] + + channel_wait_closed: don't fail when unread data is queued - _libssh2_packet_requirev() may return different errors and we pass that - to the parent instead of rewriting it. + This function was calling channel_wait_eof to ensure that the EOF + packet has already been received, but that function also checks that + the read data queue is empty before reporting the EOF. That caused + channel_wait_closed to fail with a LIBSSH2_ERROR_INVAL when some data + was queued even after a successful call to libssh2_channel_wait_eof. - Bug: http://libssh2.org/mail/libssh2-devel-archive-2013-04/0029.shtml - Reported by: Cosmin + This patch changes libssh2_channel_wait_closed to look directly into + channel->remote.eof so that both libssh2_channel_wait_eof and + libssh2_channel_wait_closed bahave consistently. -Peter Stuge (9 May 2013) -- [Marc Hoersken brought this change] +- [Salvador Fandino brought this change] - libcrypt.c: Fix typo in _libssh2_rsa_sha1_sign() parameter type + channel_wait_eof: fix debug message -Kamil Dudka (4 May 2013) -- configure.ac: replace AM_CONFIG_HEADER with AC_CONFIG_HEADERS - - Reported by: Quintus - Bug: https://trac.libssh2.org/ticket/261 +Daniel Stenberg (25 Oct 2016) +- libssh2.h: start working on 1.8.1 -Guenter Knauf (12 Apr 2013) -- Fixed copyright string for NetWare build. +Version 1.8.0 (25 Oct 2016) -Daniel Stenberg (9 Apr 2013) -- [Richard W.M. Jones brought this change] +Daniel Stenberg (25 Oct 2016) +- RELEASE-NOTES: adjusted for 1.8.0 - sftp: Add support for fsync (OpenSSH extension). - - The new libssh2_sftp_fsync API causes data and metadata in the - currently open file to be committed to disk at the server. +Kamil Dudka (20 Oct 2016) +- Revert "aes: the init function fails when OpenSSL has AES support" - This is an OpenSSH extension to the SFTP protocol. See: + This partially reverts commit f4f2298ef3635acd031cc2ee0e71026cdcda5864 + because it caused the compatibility code to call initialization routines + redundantly, leading to memory leakage with OpenSSL 1.1 and broken curl + test-suite in Fedora: - https://bugzilla.mindrot.org/show_bug.cgi?id=1798 - -- [Richard W.M. Jones brought this change] - - sftp: statvfs: Along error path, reset the correct 'state' variable. + 88 bytes in 1 blocks are definitely lost in loss record 5 of 8 + at 0x4C2DB8D: malloc (vg_replace_malloc.c:299) + by 0x72C607D: CRYPTO_zalloc (mem.c:100) + by 0x72A2480: EVP_CIPHER_meth_new (cmeth_lib.c:18) + by 0x4E5A550: make_ctr_evp.isra.0 (openssl.c:407) + by 0x4E5A8E8: _libssh2_init_aes_ctr (openssl.c:471) + by 0x4E5BB5A: libssh2_init (global.c:49) -- [Richard W.M. Jones brought this change] +Daniel Stenberg (19 Oct 2016) +- [Charles Collicutt brought this change] - sftp: seek: Don't flush buffers on same offset + libssh2_wait_socket: Fix comparison with api_timeout to use milliseconds (#134) - Signed-off-by: Richard W.M. Jones - -Guenter Knauf (9 Feb 2013) -- Updated dependency libs. + Fixes #74 -- Fixed tool macro names. +- [Charles Collicutt brought this change] -Daniel Stenberg (29 Nov 2012) -- [Seth Willits brought this change] + Set err_msg on _libssh2_wait_socket errors (#135) - compiler warnings: typecast strlen in macros +- Revert "travis: Test mbedtls too" - ... in macro parameters to avoid compiler warnings about lost precision. + This reverts commit 3e6de50a24815e72ec5597947f1831f6083b7da8. - Several macros in libssh2.h call strlen and pass the result directly to - unsigned int parameters of other functions, which warns about precision - loss because strlen returns size_t which is unsigned long on at least - some platforms (such as OS X). The fix is to simply typecast the - strlen() result to unsigned int. + Travis doesn't seem to support the mbedtls-dev package -- libssh2.h: bump version to 1.4.4-DEV +- maketgz: support "only" to only update version number locally + + and fix the date output locale -Version 1.4.3 (27 Nov 2012) +- configure: make the --with-* options override the OpenSSL default + + ... previously it would default to OpenSSL even with the --with-[crypto] + options used unless you specificly disabled OpenSSL. Now, enabling another + backend will automatically disable OpenSSL if the other one is found. -Daniel Stenberg (27 Nov 2012) -- RELEASE-NOTES: fixed for 1.4.3 +- [Keno Fischer brought this change] -- sftp_read: return error if a too large package arrives + docs: Add documentation on new cmake/configure options -Peter Stuge (13 Nov 2012) -- Only define _libssh2_dsa_*() functions when building with DSA support +- [Keno Fischer brought this change] -Guenter Knauf (8 Nov 2012) -- Added .def file to output. + configure: Add support for building with mbedtls -Kamil Dudka (1 Nov 2012) -- libssh2_hostkey_hash.3: update the description of return value - - The function returns NULL also if the hash algorithm is not available. +- [wildart brought this change] -Guenter Knauf (24 Oct 2012) -- Fixed mode acciedently committed. + travis: Test mbedtls too -- Ignore generated file. +- [wildart brought this change] -- Added hack to make use of Makefile.inc. + crypto: add support for the mbedTLS backend - This should avoid further maintainance of the objects list. + Closes #132 -- Fixed MSVC NMakefile. - - Added missing source files; added resource for DLL. +- [wildart brought this change] -Kamil Dudka (22 Oct 2012) -- examples: use stderr for messages, stdout for data - - Reported by: Karel Srot - Bug: https://bugzilla.redhat.com/867462 + cmake: Add CLEAR_MEMORY option, analogously to that for autoconf -- openssl: do not leak memory when handling errors - - ,.. in aes_ctr_init(). Detected by Coverity. +- README.md: fix link typo -- channel: fix possible NULL dereference - - ... in libssh2_channel_get_exit_signal(). Detected by Coverity. +- README: markdown version to look nicer on github -- Revert "aes: the init function fails when OpenSSL has AES support" - - This partially reverts commit f4f2298ef3635acd031cc2ee0e71026cdcda5864. - - We need to use the EVP_aes_???_ctr() functions in FIPS mode. +Viktor Szakats (5 Sep 2016) +- [Taylor Holberton brought this change] -- crypt: use hard-wired cipher block sizes consistently + openssl: add OpenSSL 1.1.0 compatibility -- openssl: do not ignore failure of EVP_CipherInit() +Daniel Stenberg (4 Sep 2016) +- [Antenore Gatta brought this change] -- kex: do not ignore failure of libssh2_md5_init() + tests: HAVE_NETINET_IN_H was not defined correctly (#127) - The MD5 algorithm is disabled when running in FIPS mode. - -Daniel Stenberg (21 Aug 2012) -- [Peter Krempa brought this change] + Fixes #125 - known_hosts: Fail when parsing unknown keys in known_hosts file. - - libssh2_knownhost_readfile() silently ignored problems when reading keys - in unsupported formats from the known hosts file. When the file is - written again from the internal structures of libssh2 it gets truntcated - to the point where the first unknown key was located. - - * src/knownhost.c:libssh2_knownhost_readfile() - return error if key - parsing fails +- SECURITY: fix web site typo -- AUTHORS: synced with 42fec44c8a4 - - 31 recent authors added +- SECURITY: security process -- [Dave Hayden brought this change] +GitHub (14 Aug 2016) +- [Alexander Lamaison brought this change] - compression: add support for zlib@openssh.com + Basic dockerised test suite. - Add a "use_in_auth" flag to the LIBSSH2_COMP_METHOD struct and a - separate "zlib@openssh.com" method, along with checking session->state - for LIBSSH2_STATE_AUTHENTICATED. Appears to work on the OpenSSH servers - I've tried against, and it should work as before with normal zlib - compression. + This introduces a test suite for libssh2. It runs OpenSSH in a Docker + container because that works well on Windows (via docker-machine) as + well as Linux. Presumably it works on Mac too with docker-machine, but + I've not tested that. + + Because the test suite is docker-machine aware, you can also run it + against a cloud provider, for more realistic network testing, by setting + your cloud provider as your active docker machine. The Appveyor CI setup + in this commit does that because Appveyor doesn't support docker + locally. -- [Dmitry Smirnov brought this change] +Kamil Dudka (3 Aug 2016) +- [Viktor Szakats brought this change] - configure: gcrypt doesn't come with pkg-config support - - ... so use plain old -lgcrypt to the linker to link with it. + misc.c: Delete unused static variables - Fixes #225 + Closes #114 -- sftp_read: Value stored to 'next' is never read - - Detected by clang-analyzer +Daniel Stenberg (9 Apr 2016) +- [Will Cosgrove brought this change] -- publickey_init: errors are negative, fix check + Merge pull request #103 from willco007/patch-2 - Detected by clang-analyzer. + Fix for security issue CVE-2016-0787 -- [Maxime Larocque brought this change] +Alexander Lamaison (2 Apr 2016) +- [Zenju brought this change] - session_free: wrong variable used for keeping state + Fix MSVC 14 compilation errors - If libssh2_session_free is called without the channel being freed - previously by libssh2_channel_free a memory leak could occur. + For _MSC_VER == 1900 these macros are not needed and create problems: - A mismatch of states variables in session_free() prevent the call to - libssh2_channel_free function. session->state member is used instead of - session->free_state. - It causes a leak of around 600 bytes on every connection on my systems - (Linux, x64 and PPC). - (Debugging done under contract for Accedian Networks) + 1>C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt\stdio.h(1925): warning C4005: 'snprintf': macro redefinition (compiling source file libssh2-files\src\mac.c) - Fixes #246 - -Guenter Knauf (29 Jun 2012) -- Small NetWare makefile tweak. + 1> \win32\libssh2_config.h(27): note: see previous definition of 'snprintf' (compiling source file libssh2-files\src\mac.c) + + 1>C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt\stdio.h(1927): fatal error C1189: #error: Macro definition of snprintf conflicts with Standard Library function declaration (compiling source file libssh2-files\src\mac.c) -- Some small Win32 makefile fixes. +Daniel Stenberg (26 Mar 2016) +- [Brad Harder brought this change] -Daniel Stenberg (19 Jun 2012) -- libssh2_userauth_publickey_fromfile_ex.3: mention publickey == NULL + _libssh2_channel_open: speeling error fixed in channel error message -- comp_method_zlib_decomp: handle Z_BUF_ERROR when inflating - - When using libssh2 to perform an SFTP file transfer from the "JSCAPE MFT - Server" (http://www.jscape.com) the transfer failed. The default JSCAPE - configuration is to enforce zlib compression on SSH2 sessions so the - session was compressed. The relevant part of the debug trace contained: - - [libssh2] 1.052750 Transport: unhandled zlib error -5 - [libssh2] 1.052750 Failure Event: -29 - decompression failure - - The trace comes from comp_method_zlib_decomp() in comp.c. The "unhandled - zlib error -5" is the status returned from the zlib function - inflate(). The -5 status corresponds to "Z_BUF_ERROR". - - The inflate() function takes a pointer to a z_stream structure and - "inflates" (decompresses) as much as it can. The relevant fields of the - z_stream structure are: - - next_in - pointer to the input buffer containing compressed data - avail_in - the number of bytes available at next_in - next_out - pointer to the output buffer to be filled with uncompressed - data - avail_out - how much space available at next_out - - To decompress data you set up a z_stream struct with the relevant fields - filled in and pass it to inflate(). On return the fields will have been - updated so next_in and avail_in show how much compressed data is yet to - be processed and next_out and avail_out show how much space is left in - the output buffer. - - If the supplied output buffer is too small then on return there will be - compressed data yet to be processed (avail_in != 0) and inflate() will - return Z_OK. In this case the output buffer must be grown, avail_out - updated and inflate() called again. - - If the supplied output buffer was big enough then on return the - compressed data will have been exhausted (avail_in == 0) and inflate() - will return Z_OK, so the data has all been uncompressed. - - There is a corner case where inflate() makes no progress. That is, there - may be unprocessed compressed data and space available in the output - buffer and yet the function does nothing. In this case inflate() will - return Z_BUF_ERROR. From the zlib documentation and the source code it - is not clear under what circumstances this happens. It could be that it - needs to write multiple bytes (all in one go) from its internal state to - the output buffer before processing the next chunk of input but but - can't because there is not enough space (though my guesses as to the - cause are not really relevant). Recovery from Z_BUF_ERROR is pretty - simple - just grow the output buffer, update avail_out and call - inflate() again. +Alexander Lamaison (15 Mar 2016) +- Link with crypt32.lib on Windows. - The comp_method_zlib_decomp() function does not handle the case when - inflate() returns Z_BUF_ERROR. It treats it as a non-recoverable error - and basically aborts the session. + Makes linking with static OpenSSL work again. Although it's not + required for dynamic OpenSSL, it does no harm. - Fixes #240 + Fixes #98. -Guenter Knauf (12 Jun 2012) -- MinGW makefile tweaks. - - Use GNU tools when compiling on Linux. - Fixed dist and dev targets. +- [Craig A. Berry brought this change] -- NetWare makefile tweaks. + Tweak VMS help file building. - Changed to use Windows commandline tools instead of - GNU tools when compiling on Windows. Fixed dist and - dev targets. Enabled nlmconv error for unresolved - symbols. + Primarily this is handling cases where top-level files moved into + the docs/ directory. I also corrected a typo and removed the + claim that libssh2 is public domain. -Daniel Stenberg (11 Jun 2012) -- Revert "config.rpath: generated file, no need to keep in git" - - This reverts commit 1ac7bd09cc685755577fb2c8829adcd081e7ab3c. - - This file still used by lib/*m4 functions so we need to keep the file - around. +- [Craig A. Berry brought this change] -- BINDINGS: added PySsh2, a Python-ctypes binding + Build with standard stat structure on VMS. + + This gets us large file support, is available on any VMS release + in the last decade and more, and gives stat other modern features + such as 64-bit ino_t. -Guenter Knauf (8 Jun 2012) -- Fixed MinGW debug build. +- [Craig A. Berry brought this change] -Daniel Stenberg (5 Jun 2012) -- BINDINGS: Added the Cocoa/Objective-C one - - ... and sorted the bindings after the languages, alphabetically + Update vms/libssh2_config.h. - Reported by: Mike Abdullah + VMS does have stdlib.h, gettimeofday(), and OpenSSL. The latter + is appropriate to hard-wire in the configuration because it's + installed by default as part of the base operating system and + there is currently no libgcrypt port. -- BINDINGS: document the bindings we know of +- [Craig A. Berry brought this change] -Guenter Knauf (4 Jun 2012) -- Fixed LIBSSH2_INT64_T_FORMAT macro. + VMS can't use %zd for off_t format. - Usually a format macro should hold the whole format, otherwise - it should be named a prefix. Also fixed usage of this macro in - scp.c for a signed var where it was used as prefix for unsigned. + %z is a C99-ism that VMS doesn't currently have; even though the + compiler is C99-compliant, the library isn't quite. The off_t used + for the st_size element of the stat can be 32-bit or 64-bit, so + detect what we've got and pick a format accordingly. -- Removed obsolete define from makefiles. +- [Craig A. Berry brought this change] -- Renamed NetWare makefiles. + Normalize line endings in libssh2_sftp_get_channel.3. + + Somehow it got Windows-style CRLF endings so convert to just LF, + for consistency as well as not to confuse tools that will regard + the \r as content (e.g. the OpenVMS help librarian). -- Renamed NetWare makefiles. +Dan Fandrich (29 Feb 2016) +- libgcrypt: Fixed a NULL pointer dereference on OOM -- Synced MinGW makefiles with 56c64a6..39e438f. - - Also synced MinGW test makefile with b092696..f8cb874. +Daniel Stenberg (24 Feb 2016) +- [Viktor Szakats brought this change] -Peter Stuge (30 May 2012) -- Revert "sftp: Don't send attrs.permissions on read-only SSH_FXP_OPEN" + url updates, HTTP => HTTPS - This reverts commit 04e79e0c798674a0796be8a55f63dd92e6877790. + Closes #87 -- sftp: Don't send attrs.permissions on read-only SSH_FXP_OPEN +Dan Fandrich (23 Feb 2016) +- RELEASE-NOTES: removed some duplicated names + +Version 1.7.0 (23 Feb 2016) + +Daniel Stenberg (23 Feb 2016) +- web: the site is now HTTPS + +- RELEASE-NOTES: 1.7.0 release + +- diffie_hellman_sha256: convert bytes to bits - This works around a protocol violation in the ProFTPD 1.3.4 mod_sftp - server, as reported by Will Cosgrove in: + As otherwise we get far too small numbers. - http://libssh2.org/mail/libssh2-devel-archive-2012-05/0079.shtml + Reported-by: Andreas Schneider - Based on a suggested fix by TJ Saunders in: - - http://libssh2.org/mail/libssh2-devel-archive-2012-05/0104.shtml + CVE-2016-0787 -Guenter Knauf (28 May 2012) -- Try to detect OpenSSL build type automatically. +Alexander Lamaison (18 Feb 2016) +- Allow CI failures with VS 2008 x64. - Also fixed recently added libgdi32 linkage which is only - required when OpenSSL libs are linked statically. + Appveyor doesn't support this combination. -Daniel Stenberg (25 May 2012) -- config.rpath: generated file, no need to keep in git +Daniel Stenberg (16 Feb 2016) +- [Viktor Szakats brought this change] -Guenter Knauf (22 May 2012) -- Updated dependency libary versions. + GNUmakefile: list system libs after user libs + + Otherwise some referenced WinSock functions will fail to + resolve when linking against LibreSSL 2.3.x static libraries + with mingw. + + Closes #80 -Daniel Stenberg (18 May 2012) -- 1.4.3: towards the future +- [Viktor Szakats brought this change] -Version 1.4.2 (18 May 2012) + openssl: apply new HAVE_OPAQUE_STRUCTS macro + + Closes #81 -Daniel Stenberg (18 May 2012) -- RELEASE-NOTES: synced with 92a9f952794 +- [Viktor Szakats brought this change] -Alexander Lamaison (15 May 2012) -- win32/libssh2_config.h: Remove hardcoded #define LIBSSH2_HAVE_ZLIB. + openssl: fix LibreSSL support after OpenSSL 1.1.0-pre1/2 support + +Alexander Lamaison (14 Feb 2016) +- sftp.h: Fix non-C90 type. - Rationale: Everything else in this file states a fact about the win32 - platform that is unconditional for that platform. There is nothing - unconditional about the presence of zlib. It is neither included with - Windows nor with the platform SDK. Therefore, this is not an appropriate - place to assert its presence. Especially as, once asserted, it cannot be - overridden using a compiler flag. + uint64_t does not exist in C90. Use libssh2_uint64_t instead. + +- Exclude sshd tests from AppVeyor. - In contrast, if it is omitted, then it can easily be reasserted by adding - a compiler flag defining LIBSSH2_HAVE_ZLIB. + They fail complaining that sshd wasn't invoked with an absolute path. -Daniel Stenberg (14 May 2012) -- RELEASE-NOTES: synced with 69a3354467c +- Test on more versions of Visual Studio. -- _libssh2_packet_add: SSH_MSG_CHANNEL_REQUEST default to want_reply - - RFC4254 says the default 'want_reply' is TRUE but the code defaulted to - FALSE. Now changed. - - Fixes #233 +- Fix Appveyor builds. -- gettimeofday: no need for a replacement under cygwin - - Fixes #224 +Daniel Stenberg (14 Feb 2016) +- [Viktor Szakats brought this change] -Alexander Lamaison (13 May 2012) -- Prevent sftp_packet_read accessing freed memory. + openssl: add OpenSSL 1.1.0-pre3-dev compatibility - sftp_packet_add takes ownership of the packet passed to it and (now that we - handle zombies) might free the packet. sftp_packet_read uses the packet type - byte as its return code but by this point sftp_packet_add might have freed - it. This change fixes the problem by caching the packet type before calling - sftp_packet_add. + by using API instead of accessing an internal structure. - I don't understand why sftp_packet_read uses the packet type as its return - code. A future change might get rid of this entirely. + Closes #83 -Daniel Stenberg (12 May 2012) -- sftp_packet_flush: flush zombies too - - As this function is called when the SFTP session is closed, it needs to - also kill all zombies left in the SFTP session to avoid leaking memory - just in case some zombie would still be in there. +- RELEASE-NOTES: synced with 996b04ececdf -- sftp_packetlist_flush: zombies must not have responses already - - When flushing the packetlist, we must only add the request as a zombie - if no response has already been received. Otherwise we could wrongly - make it a zombie even though the response was already received and then - we'd get a zombie stuck there "forever"... +- include/libssh2.h: next version is 1.7.0 -- sftp_read: on EOF remove packet before flush - - Since the sftp_packetlist_flush() function will move all the existing - FXP_READ requests in this handle to the zombie list we must first remove - this just received packet as it is clearly not a zombie. +- configure: build "silent" if possible -- sftp_packet_require: sftp_packet_read() returning 0 is not an error - - Exactly as the comment in the code said, checking the return code from - sftp_packet_read() with <= was wrong and it should be < 0. With the new - filtering on incoming packets that are "zombies" we can now see this - getting zero returned. +- sftp: re-indented some minor stuff -- sftp_packetlist_flush: only make it zombie if it was sent +- [Jakob Egger brought this change] + + sftp.c: ensure minimum read packet size - The list of outgoing packets may also contain packets that never were - sent off and we better not make them zombies too. + For optimum performance we need to ensure we don't request tiny packets. -- [Alexander Lamaison brought this change] +- [Jakob Egger brought this change] - Mark outstanding read requests after EOF as zombies. + sftp.c: Explicit return values & sanity checks + +- [Jakob Egger brought this change] + + sftp.c: Check Read Packet File Offset - In order to be fast, sftp_read sends many read requests at once. With a small - file, this can mean that when EOF is received back, many of these requests are - still outstanding. Responses arriving after we close the file and abandon the - file handle are queued in the SFTP packet queue and never collected. This - causes transfer speed to drop as a progressively longer queue must be searched - for every packet. + This commit adds a simple check to see if the offset of the read + request matches the expected file offset. - This change introduces a zombie request-ID list in the SFTP session that is - used to recognise these outstanding requests and prevent them being added to - the queue. + We could try to recover, from this condition at some point in the future. + Right now it is better to return an error instead of corrupted data. -Peter Stuge (23 Apr 2012) -- [Rafael Kitover brought this change] +- [Jakob Egger brought this change] - Update win32/GNUmakefile to use OpenSSL 1.0.1a - - libcrypto on win32 now depends on gdi32.dll, so move the OpenSSL LDLIBS - block to before the compiler definitions, so that libcrypto gets added - first, and then add -lgdi32 into the following common LDLIBS for gcc. + sftp.c: Don't return EAGAIN if data was written to buffer -Guenter Knauf (23 Apr 2012) -- Changed 'Requires' to 'Requires.private'. - - Only static builds need to link against the crypto libs. +- [Jakob Egger brought this change] -- Fixed 'Requires:' names. + sftp.c: Send at least one read request before reading - The 'Requires:' line lists the names of the .pc files. - -- Added 'Requires:' line to libssh2.pc. + This commit ensures that we have sent at least one read request before + we try to read data in sftp_read(). - This is necessary so that other libs which lookup libssh2 info - via pkg-config can add the right crypto lib dependencies. + Otherwise sftp_read() would return 0 bytes (indicating EOF) if the + socket is not ready for writing. -- Updated dependency lib versions. +- [Jakob Egger brought this change] -Peter Stuge (18 Apr 2012) -- configure.ac: Add option to disable build of the example applications + sftp.c: stop reading when buffer is full - Examples are built by default. Any of the following options on the - configure command line will skip building them: + Since we can only store data from a single chunk in filep, + we have to stop receiving data as soon as the buffer is full. - --disable-examples-build - --enable-examples-build=no - --enable-examples-build=false + This adresses the following bug report: + https://github.com/libssh2/libssh2/issues/50 -- userauth.c: fread() from public key file to correctly detect any errors - - If the filename parameter for file_read_publickey() was the name of a - directory instead of a file then libssh2 would spin trying to fgetc() - from the FILE * for the opened directory when trying to determine the - length of the encoded public key, since fgetc() can't report errors. +Salvador Fandiño (21 Jan 2016) +- agent_disconnect_unix: unset the agent fd after closing it - Use fread() instead to correctly detect this error condition along - with many others. + "agent_disconnect_unix", called by "libssh2_agent_disconnect", was + leaving the file descriptor in the agent structure unchanged. Later, + "libssh2_agent_free" would call again "libssh2_agent_disconnect" under + the hood and it would try to close again the same file descriptor. In + most cases that resulted in just a harmless error, but it is also + possible that the file descriptor had been reused between the two + calls resulting in the closing of an unrelated file descriptor. - This fixes the problem reported in - http://www.libssh2.org/mail/libssh2-devel-archive-2012-04/0021.shtml + This patch sets agent->fd to LIBSSH2_INVALID_SOCKET avoiding that + issue. - Reported-by: Oleksiy Zagorskyi + Signed-off-by: Salvador Fandiño -- Return LIBSSH2_ERROR_SOCKET_DISCONNECT on EOF when reading banner +Daniel Stenberg (18 Jan 2016) +- [Patrick Monnerat brought this change] -Guenter Knauf (17 Apr 2012) -- Fixed copyright year. + os400qc3: support encrypted private keys + + PKCS#8 EncryptedPrivateKeyinfo structures are recognized and decoded to get + values accepted by the Qc3 crypto library. -- Updated dependency lib versions in static makefiles. +- [Patrick Monnerat brought this change] -Daniel Stenberg (6 Apr 2012) -- version: bump to 1.4.2 + os400qc3: New PKCS#5 decoder - We're on the 1.4.2 track now (at least) + The Qc3 library is not able to handle PKCS#8 EncryptedPrivateKeyInfo structures + by itself. It is only capable of decrypting the (encrypted) PrivateKeyInfo + part, providing a key encryption key and an encryption algorithm are given. + Since the encryption key and algorithm description part in a PKCS#8 + EncryptedPrivateKeyInfo is a PKCS#5 structure, such a decoder is needed to + get the derived key method and hash, as well as encryption algorith and + initialisation vector. -Version 1.4.1 (4 Apr 2012) +- [Patrick Monnerat brought this change] -Daniel Stenberg (4 Apr 2012) -- RELEASE-NOTES: updated for 1.4.1 release + os400qc3: force continuous update on non-final hash/hmac computation -- always do "forced" window updates - - When calling _libssh2_channel_receive_window_adjust() internally, we now - always use the 'force' option to prevent libssh2 to avoid sending the - update if the update isn't big enough. - - It isn't fully analyzed but we have seen corner cases which made a - necessary window update not get send due to this and then the other side - doesn't send data our side then sits waiting for forever. +- [Patrick Monnerat brought this change] -- channel_read: force window adjusts! - - if there's not enough room to receive the data that's being requested, - the window adjustment needs to be sent to the remote and thus the force - option has to be used. _libssh2_channel_receive_window_adjust() would - otherwise "queue" small window adjustments for a later packet but that - is really terribly for the small buffer read that for example is the - final little piece of a very large file as then there is no logical next - packet! + os400qc3: Be sure hmac keys have a minimum length - Reported by: Armen Babakhanian - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0130.shtml + The Qc3 library requires a minimum key length depending on the target + hash algorithm. Append binary zeroes to the given key if not long enough. + This matches RFC 2104 specifications. -- [Paul Howarth brought this change] +- [Patrick Monnerat brought this change] - aes: the init function fails when OpenSSL has AES support - - The internal init function only worked fine when the configure script - didn't detect the OpenSSL AES_CTR function! + os400qc3: Slave descriptor for key encryption key - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0111.shtml - Reported by: Paul Howarth - -- [Matthew Booth brought this change] - - transport_send: Finish in-progress key exchange before sending data - - _libssh2_channel_write() first reads outstanding packets before writing - new data. If it reads a key exchange request, it will immediately start - key re-exchange, which will require sending a response. If the output - socket is full, this will result in a return from - _libssh2_transport_read() of LIBSSH2_ERROR_EAGAIN. In order not to block - a write because there is no data to read, this error is explicitly - ignored and the code continues marshalling a packet for sending. When it - is sent, the remote end immediately drops the connection because it was - expecting a continuation of the key exchange, but got a data packet. - - This change adds the same check for key exchange to - _libssh2_transport_send() that is in _libssh2_transport_read(). This - ensures that key exchange is completed before any data packet is sent. + The Qc3 library requires the key encryption key to exist as long as + the encrypted key is used. Its descriptor token is then kept as an + "encrypted key slave" for recursive release. -- channel_write: acknowledge transport errors - - When draining data off the socket with _libssh2_transport_read() (which - in turn has to be done so that we can be sure to have read any possible - window-increasing packets), this code previously ignored errors which - could lead to nasty loops. Now all error codes except EAGAIN will cause - the error to be returned at once. - - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0068.shtml - Reported by: Matthew Booth +- [Patrick Monnerat brought this change] -- [Steven Dake brought this change] + os400qc3.c: comment PEM/DER decoding - In examples/x11.c, Make sure sizeof passed to read operation is correct - - sizeof(buf) expands to 8 or 4 (since its a pointer). This variable may - have been static in the past, leading to this error. - - Signed-off-by: Steven Dake +- [Patrick Monnerat brought this change] -- [Steven Dake brought this change] + os400qc3.c: improve ASN.1 header byte checks - Fix suspicious sizeof usage in examples/x11.c - - In the x11 example, sizeof(buf) = 8UL (on x86_64), when this should - probably represent the buffer size available. I am not sure how to - test that this change is actually correct, however. - - Signed-off-by: Steven Dake +- [Patrick Monnerat brought this change] -- sftp_packet_read: follow-up fix for EAGAIN/window adjust - - The commit in 7194a9bd7ba45 wasn't complete. This change makes sure - variables are initialized properly before used in the EAGAIN and window - adjust cases. + os400qc3.c: improve OID matching -- sftp_packet_add: use named error code instead of number +- [Patrick Monnerat brought this change] -- sftp_packet_add: verify the packet before accepting it - - In order to bail out as quickly as possible when things are wrong and - out of sync, make sure the SFTP message is one we understand. + os400: os400qc3.c: replace malloc by LIBSSH2_ALLOC or alloca where possible -- SFTP: preserve the original error code more - - Lots of places in the code translated the original error into the more - generic LIBSSH2_ERROR_SOCKET_TIMEOUT but this turns out to distort the - original error reason a lot and makes tracking down the real origin of a - problem really hard. This change makes the original error code be - preserved to a larger extent when return up to the parent function. +- [Patrick Monnerat brought this change] -- sftp_packet_read: adjust window size as necessary - - Commit 03ca9020756 tried to simplify the window sizing logic but broke - SFTP readdir as there was no window sizing code left there so large - directory listings no longer worked. - - This change introduces window sizing logic to the sftp_packet_read() - function so that it now tells the remote about the local size having a - window size that suffice when it is about to ask for directory data. - - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0069.shtml - Reported by: Eric + os400: asn1_new_from_bytes(): use data from a single element only -- [Steven Dake brought this change] +- [Patrick Monnerat brought this change] - Tell C compiler we don't care about return code of libssh2_init - - The call of libssh2_init returns a return code, but nothing could be done - within the _libssh2_init_if_needed execution path. - - Signed-off-by: Steven Dake + os400: fix an ILE/RPG prototype -- [Steven Dake brought this change] +- [Patrick Monnerat brought this change] - Add comment indicating a resource leak is not really a resource leak - - While possibly obvious to those investigating the code, coverity complains - about this out of scope leak. - - Signed-off-by: Steven Dake + os400: implement character encoding conversion support -- [Steven Dake brought this change] +- [Patrick Monnerat brought this change] - Use safer snprintf rather then sprintf in scp_send() + os400: do not miss some external prototypes - Signed-off-by: Steven Dake - -- [Steven Dake brought this change] + Build procedure extproto() did not strip braces from header files, thus + possibly prepended them to true prototypes. This prevented the prototype to + be recognized as such. + The solution implemented here is to map braces to semicolons, effectively + considering them as potential prototype delimiters. - Use safer snprintf rather then sprintf in scp_recv() - - While the buffer is indeed allocated to a safe length, better safe then sorry. - - Signed-off-by: Steven Dake +- [Patrick Monnerat brought this change] -- [Steven Dake brought this change] + os400: Really add specific README - use snprintf in knownhost_writeline() rather then sprintf - - Although the function checks the length, if the code was in error, there - could potentially be a buffer overrun with the use of sprintf. Instead replace - with snprintf. - - Signed-off-by: Steven Dake +- [Patrick Monnerat brought this change] -- [Steven Dake brought this change] + os400: Add specific README and include new files in dist tarball - Add tracing to print packets left on session at libssh2_session_free - - Signed-off-by: Steven Dake +- [Patrick Monnerat brought this change] -Peter Stuge (2 Mar 2012) -- Define and use LIBSSH2_INVALID_SOCKET instead of INVALID_SOCKET - - INVALID_SOCKET is a special value in Windows representing a - non-valid socket identifier. We were #defining this to -1 on - non-Windows platforms, causing unneccessary namespace pollution. - Let's have our own identifier instead. - - Thanks to Matt Lawson for pointing this out. + os400: add compilation scripts -- nw/Makefile.netware: Fix project name typo to avoid needless confusion +- [Patrick Monnerat brought this change] -- example/x11: Set raw terminal mode manually instead of with cfmakeraw() - - OpenSolaris has no cfmakeraw() so to make the example more portable - we simply do the equivalent operations on struct termios ourselves. + os400: include files for ILE/RPG - Thanks to Tom Weber for reporting this problem, and finding a solution. + In addition, file os400/macros.h declares all procedures originally + defined as macros. It must not be used for real inclusion and is only + intended to be used as a `database' for macro wrapping procedures generation. -Daniel Stenberg (17 Feb 2012) -- sftp_write: cannot return acked data *and* EAGAIN - - Whenever we have acked data and is about to call a function that *MAY* - return EAGAIN we must return the number now and wait to get called - again. Our API only allows data *or* EAGAIN and we must never try to get - both. +- [Patrick Monnerat brought this change] -Peter Stuge (13 Feb 2012) -- example/x11: Build only when sys/un.h is found by configure - - The example can't be built on systems without AF_UNIX sockets. + os400: add supplementary header files/wrappers. Define configuration. -Daniel Stenberg (10 Feb 2012) -- [Alexander Lamaison brought this change] +- [Patrick Monnerat brought this change] - Simplified sftp_read. + Protect callback function calls from macro substitution - Removed the total_read variable that originally must have tracked how - much data had been written to the buffer. With non-blocking reads, we - must return straight away once we have read data into the buffer so this - variable served not purpose. + Some structure fields holding callback addresses have the same name as the + underlying system function (connect, send, recv). Set parentheses around + their reference to suppress a possible macro substitution. - I think it was still hanging around in case the initial processing of - 'leftover' data meant we wrote to the buffer but this case, like the - others, must return immediately. Now that it does, the last remaining - need for the variable is gone. + Use a macro for connect() on OS/400 to resolve a const/nonconst parameter + problem. -- [Alexander Lamaison brought this change] +- [Patrick Monnerat brought this change] - Cleaned up sftp_read and added more explanation. - - Replaced the gotos which were implementing the state machine with - a switch statement which makes the states more explicit. + Add interface for OS/400 crypto library QC3 -- sftp_read: avoid data *and* EAGAIN - - Whenever we have data and is about to call a function that *MAY* return - EAGAIN we must return the data now and wait to get called again. Our API - only allows data *or* EAGAIN and we must never try to get both. +- [Patrick Monnerat brought this change] -Peter Stuge (2 Feb 2012) -- Add a tcpip-forward example which demonstrates remote port forwarding + misc: include stdarg.h for debug code -- libssh2.h: Add missing prototype for libssh2_session_banner_set() +- [Patrick Monnerat brought this change] -- example/subsystem_netconf.c: Return error when read buffer is too small - - Also remove a little redundancy in the read loop condition. + Document crypto library interface -- example/subsystem_netconf.c: Add a missing newline in an error message +- [Patrick Monnerat brought this change] -- Fix undefined reference to _libssh_error in libgcrypt backend + Feature an optional crypto-specific macro to rsa sign a data fragment vector - Commit 209de22299b4b58e582891dfba70f57e1e0492db introduced a function - call to a non-existing function, and since then the libgcrypt backend - has not been buildable. - -Version 1.4.0 (31 Jan 2012) - -Daniel Stenberg (31 Jan 2012) -- RELEASE-NOTES: synced with 6bd584d29 for 1.4.0 - -- s/1.3.1/1.4.0 + OS/400 crypto library is unable to sign a precomputed SHA1 hash: however + it does support a procedure that hashes data fragments and rsa signs. + If defined, the new macro _libssh2_rsa_sha1_signv() implements this function + and disables use of _libssh2_rsa_sha1_sign(). - We're bumping the minor number + The function described above requires that the struct iovec unused slacks are + cleared: for this reason, macro libssh2_prepare_iovec() has been introduced. + It should be defined as empty for crypto backends that are not sensitive + to struct iovec unused slack values. -- [Jernej Kovacic brought this change] +- [Patrick Monnerat brought this change] - libssh2_session_supported_algs: fix compiler warning + Fold long lines in include files -- [Jernej Kovacic brought this change] +- [Viktor Szakats brought this change] - session_supported_algs docs: added an example + kex.c: fix indentation + + Closes #71 -- [Gellule Xg brought this change] +- [Viktor Szakats brought this change] - sftp-seek: clear EOF flag + add OpenSSL-1.1.0-pre2 compatibility - Set the EOF flag to False when calling seek64 to be able to get some - data back on a following read + Closes #70 -- [Peter Krempa brought this change] +- [Viktor Szakats brought this change] - userauth: Provide more informations if ssh pub key extraction fails + add OpenSSL 1.1.0-pre1 compatibility - If the function that extracts/computes the public key from a private key - fails the errors it reports were masked by the function calling it. This - patch modifies the key extraction function to return errors using - _libssh_error() function. The error messages are tweaked to contain - reference to the failed operaton in addition to the reason. - - * AUTHORS: - add my name - * libgcrypt.c: _libssh2_pub_priv_keyfile(): - return a more verbose - error using - _libssh2_error() func. - * openssl.c: - modify call graph of _libssh2_pub_priv_keyfile() to use - _libssh2_error for error reporting(); - * userauth.c: - tweak functions calling _libssh2_pub_priv_keyfile() not - to shadow error messages + * close https://github.com/libssh2/libssh2/issues/69 + * sync a declaration with the rest of similar ones + * handle EVP_MD_CTX_new() returning NULL with OpenSSL 1.1.0 + * fix potential memory leak with OpenSSL 1.1.0 in + _libssh2_*_init() functions, when EVP_MD_CTX_new() succeeds, + but EVP_DigestInit() fails. -- TODO: remove issues we (sort of) did already +Marc Hoersken (22 Dec 2015) +- wincng.c: fixed _libssh2_wincng_hash_final return value + + _libssh2_wincng_hash_final was returning the internal BCRYPT + status code instead of a valid libssh2 return value (0 or -1). + + This also means that _libssh2_wincng_hash never returned 0. -- ssh2_exec: skip error outputs for EAGAIN +- wincng.c: fixed possible memory leak in _libssh2_wincng_hash - Since the example uses non-blocking mode, it will just flood the output - with this "nonsense" error. + If _libssh2_wincng_hash_update failed _libssh2_wincng_hash_final + would never have been called before. + + Reported by Zenju. -Guenter Knauf (30 Nov 2011) -- Some NetWare makefile tweaks. +Kamil Dudka (15 Dec 2015) +- [Paul Howarth brought this change] -Daniel Stenberg (18 Nov 2011) -- LIBSSH2_SFTP_PACKET_MAXLEN: increase to 80000 + libssh2.pc.in: fix the output of pkg-config --libs - Some SFTP servers send SFTP packets larger than 40000. Since the limit - is only present to avoid insane sizes anyway, we can easily bump it. + ... such that it does not include LDFLAGS used to build libssh2 itself. + There was a similar fix in the curl project long time ago: - The define was formerly in the public header libssh2_sftp.h but served - no external purpose and was moved into the source dir. + https://github.com/bagder/curl/commit/curl-7_19_7-56-g4c8adc8 - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-11/0004.shtml - Reported by: Michael Harris + Bug: https://bugzilla.redhat.com/1279966 + Signed-off-by: Kamil Dudka -Alexander Lamaison (18 Nov 2011) -- [Peter Krempa brought this change] +Marc Hoersken (6 Dec 2015) +- hostkey.c: align code path of ssh_rsa_init to ssh_dss_init - knownhost_check(): Don't dereference ext if NULL is passed +- hostkey.c: fix invalid memory access if libssh2_dsa_new fails - Documentation for libssh2_knownhost_checkp() and related functions - states that the last argument is filled with data if non-NULL. + Reported by dimmaq, fixes #66 + +Daniel Stenberg (3 Nov 2015) +- [Will Cosgrove brought this change] + + gcrypt: define libssh2_sha256_ctx - "knownhost if set to non-NULL, it must be a pointer to a 'struct - libssh2_knownhost' pointer that gets filled in to point to info about a - known host that matches or partially matches." + Looks like it didn't make it into the latest commit for whatever reason. - In this function ext is dereferenced even if set to NULL, causing - segfault in applications not needing the extra data. + Closes #58 -Daniel Stenberg (11 Nov 2011) -- [Peter Krempa brought this change] +- [Salvador Fandino brought this change] - knownhost_add: Avoid dereferencing uninitialized memory on error path. + libssh2_session_set_last_error: Add function - In function knownhost_add, memory is alocated for a new entry. If normal - alocation is used, memory is not initialized to 0 right after, but a - check is done to verify if correct key type is passed. This test is done - BEFORE setting the memory to null, and on the error path function - free_host() is called, that tries to dereference unititialized memory, - resulting into a glibc abort(). + Net::SSH2, the Perl wrapping module for libssh2 implements several features* + on top of libssh2 that can fail and so need some mechanism to report the error + condition to the user. - * knownhost.c - knownhost_add(): - move typemask check before alloc - -- windows build: add define to avoid compiler warning + Until now, besides the error state maintained internally by libssh2, another + error state was maintained at the Perl level for every session object and then + additional logic was used to merge both error states. That is a maintenance + nighmare, and actually there is no way to do it correctly and consistently. - A recent mingw compiler has started to complain on "#warning Please - include winsock2.h before windows.h" unless the magic define is set - first. + In order to allow the high level language to add new features to the library + but still rely in its error reporting features the new function + libssh2_session_set_last_error (that just exposses _libssh2_error_flags) is + introduced. - Reported by: Vincent Torri - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-10/0064.shtml + *) For instance, connecting to a remote SSH service giving the hostname and + port. + + Signed-off-by: Salvador Fandino + Signed-off-by: Salvador Fandiño -Henrik Nordstrom (31 Oct 2011) -- [Vincent Torri brought this change] +- [Salvador Fandino brought this change] - Correct Windows include file name case, simplifying cross-compilation + _libssh2_error: Support allocating the error message - When cross compiling to Windows, libssh2.h include Windows header files - with upper case filenames : BaseTsd.h and WinSock2.h. + Before this patch "_libssh2_error" required the error message to be a + static string. - These files have lowercase names with mingw-w64 (iirc, it's the same with - mingw). And as on Windows, being lowercase or uppercase does not matter. + This patch adds a new function "_libssh2_error_flags" accepting an + additional "flags" argument and specifically the flag + "LIBSSH2_ERR_FLAG_DUP" indicating that the passed string must be + duplicated into the heap. + + Then, the method "_libssh2_error" has been rewritten to use that new + function under the hood. + + Signed-off-by: Salvador Fandino + Signed-off-by: Salvador Fandiño -Daniel Stenberg (25 Oct 2011) -- [Jernej Kovacic brought this change] +- [Will Cosgrove brought this change] - libssh2_session_supported_algs: added + added engine.h include to fix warning -- [Kamil Dudka brought this change] +- [sune brought this change] - example/sftp_RW_nonblock: do not ignore LIBSSH2_ERROR_EAGAIN + kex.c: removed dupe entry from libssh2_kex_methods[] - Bug: https://bugzilla.redhat.com/745420 + Closes #51 -Peter Stuge (5 Oct 2011) -- example/ssh2_agent: Print host key fingerprint before authentication - - Also moves the comment about not being authenticated to before the - agent authentication takes place, so that it better matches the code. +- [Salvador Fandiño brought this change] -Daniel Stenberg (29 Sep 2011) -- OpenSSL EVP: fix threaded use of structs + userauth: Fix off by one error when reading public key file - Make sure we don't clear or reset static structs after first init so - that they work fine even when used from multiple threads. Init the - structs in the global init. + After reading the public key from file the size was incorrectly + decremented by one. - Help and assistance by: John Engstrom + This was usually a harmless error as the last character on the public + key file is an unimportant EOL. But if due to some error the public key + file is empty, the public key size becomes (uint)(0 - 1), resulting in + an unrecoverable out of memory error later. - Fixes #229 (again) + Signed-off-by: Salvador Fandi??o -- openssl: don't init static structs differently +- [Salvador Fandino brought this change] + + channel: Detect bad usage of libssh2_channel_process_startup - make_ctr_evp() is changed to take a struct pointer, and then each - _libssh2_EVP_aes_[keylen]_ctr function is made to pass in their own - static struct + A common novice programmer error (at least among those using the + wrapping Perl module Net::SSH2), is to try to reuse channels. - Reported by: John Engstrom - Fixes #229 + This patchs detects that incorrect usage and fails with a + LIBSSH2_ERROR_BAD_USE error instead of hanging. + + Signed-off-by: Salvador Fandino -Guenter Knauf (27 Sep 2011) -- Removed obsolete include path. +- [Will Cosgrove brought this change] -Daniel Stenberg (21 Sep 2011) -- read_state: clear the state variable better + kex: Added diffie-hellman-group-exchange-sha256 support - Set read_state back to idle before trying to send anything so that if - the state somehow is wrongly set. + ... and fixed HMAC_Init depricated usage - Also, avoid such a case of confusion by resetting the read_state when an - sftp handle is closed. + Closes #48 -- sftp_read: remove leftover fprintf +Alexander Lamaison (21 Sep 2015) +- Prefixed new #defines to prevent collisions. - Reported by: Alexander Lamaison + Other libraries might have their own USE_WIN32_*FILES. -- sftp.h: fix the #ifdef to prevent multiple inclusions +- [keith-daigle brought this change] -- sftp_read: use a state variable to avoid bad writes - - When a channel_write call has gotten an EAGAIN back, we try harder to - continue the same write in the subsequent invoke. + Update examples/scp.c to fix bug where large files on win32 would cause got to wrap and go negative -- window_size: explicit adjustments only +- [David Byron brought this change] + + add libssh2_scp_recv2 to support large (> 2GB) files on windows + +Daniel Stenberg (17 Sep 2015) +- [sune brought this change] + + WinCNG: support for SHA256/512 HMAC - Removed the automatic window_size adjustments from - _libssh2_channel_read() and instead all channel readers must now make - sure to enlarge the window sizes properly themselves. + Closes #47 + +- [brian m. carlson brought this change] + + Add support for HMAC-SHA-256 and HMAC-SHA-512. - libssh2_channel_read_ex() - the public function, now grows the window - size according to the requested buffer size. Applications can still opt - to grow the window more on demand. Larger windows tend to give higher - performance. + Implement support for these algorithms and wire them up to the libgcrypt + and OpenSSL backends. Increase the maximum MAC buffer size to 64 bytes + to prevent buffer overflows. Prefer HMAC-SHA-256 over HMAC-SHA-512, and + that over HMAC-SHA-1, as OpenSSH does. - sftp_read() now uses the read-ahead logic to figure out a window_size. + Closes #40 -- libssh2.h: bump the default window size to 256K +- [Zenju brought this change] -- libssh2_userauth_keyboard_interactive.3: fix man warning + kex: free server host key before allocating it (again) - It seemed to occur due to the excessive line length + Fixes a memory leak when Synology server requests key exchange + + Closes #43 -- [Mikhail Gusarov brought this change] +- [Viktor Szakats brought this change] - Add missing .gitignore entries + GNUmakefile: up OpenSSL version + + closes #23 -- [Mikhail Gusarov brought this change] +- [Viktor Szakats brought this change] - Add manpage syntax checker to 'check' target + GNUmakefile: add -m64 CFLAGS when targeting mingw64, add -m32/-m64 to LDFLAGS - In virtually every libssh2 release Debian's lintian catches syntax errors in - manpages. Prevent it by checking manpages as a part of testsuite. + libssh2 equivalent of curl patch https://github.com/bagder/curl/commit/d21b66835f2af781a3c2a685abc92ef9f0cd86be + + This allows to build for the non-default target when using a multi-target mingw distro. + Also bump default OpenSSL dependency path to 1.0.2c. -- libssh2_banner_set.3: fix nroff syntax mistake +- [Viktor Szakats brought this change] -Guenter Knauf (10 Sep 2011) -- Use predefined resource compiler macro. - -- Added casts to silent compiler warnings. - -- Fixed uint64_t printf. - -- Fixed macro function signatures. - -- NetWare makefile tweaks. - -- Removed unused var. - -- Added 2 samples not mentioned. - -- Dont build x11 sample with MinGW. - -- Fixed executable file description. + GNUmakefile: add support for LIBSSH2_LDFLAG_EXTRAS + + It is similar to existing LIBSSH2_CFLAG_EXTRAS, but for + extra linker options. + + Also delete some line/file ending whitespace. + + closes #27 -- Removed unused var. +- [nasacj brought this change] -- Kill stupid gcc 3.x uninitialized warning. + hostkey.c: Fix compiling error when OPENSSL_NO_MD5 is defined + + Closes #32 -- Build all examples. +- [Mizunashi Mana brought this change] -- More MinGW makefile tweaks. + openssl.h: adjust the rsa/dsa includes - Renamed *.mingw makefiles to GNUmakefile since GNU make picks these - up automatically, and therefore win32/Makefile removed. - -- Removed forgotten WINSOCK_VERSION defines. + ... to work when built without DSA support. + + Closes #36 -Daniel Stenberg (9 Sep 2011) -- libssh2_session_startup(3) => libssh2_session_handshake(3) +Alexander Lamaison (26 Jul 2015) +- Let CMake build work as a subproject. - Propagate for the current function in docs and examples. - libssh2_session_startup() is deprecated. + Patch contributed by JasonHaslam. -- libssh2_banner_set => libssh2_session_banner_get +- Fix builds with Visual Studio 2015. - Marked the old function as deprecated. Added the new name in the correct - name space with the same arguments and functionality. + VS2015 moved stdio functions to the header files as inline function. That means check_function_exists can't detect them because it doesn't use header files - just does a link check. Instead we need to use check_symbol_exists with the correct headers. -- new function: libssh2_session_banner_get +Kamil Dudka (2 Jul 2015) +- cmake: include CMake files in the release tarballs - Returns the banner from the server handshake + Despite we announced the CMake support in libssh2-1.6.0 release notes, + the files required by the CMake build system were not included in the + release tarballs. Hence, the only way to use CMake for build was the + upstream git repository. - Fixes #226 - -- libssh2.h: bump version to 1.4.0 for new function(s) + This commit makes CMake actually supported in the release tarballs. -- remove embedded CVS/svn tags +- tests/mansyntax.sh: fix 'make distcheck' with recent autotools + + Do not create symbolic links off the build directory. Recent autotools + verify that out-of-source build works even if the source directory tree + is not writable. -- [liuzl brought this change] +- openssl: fix memleak in _libssh2_dsa_sha1_verify() - API add:libssh2_sftp_get_channel +Daniel Stenberg (12 Jun 2015) +- openssl: make libssh2_sha1 return error code - Return the channel of sftp, then caller can - control the channel's behavior. + - use the internal prefix _libssh2_ for non-exported functions - Signed-off-by: liuzl - -- _libssh2_channel_read: react on errors from receive_window_adjust + - removed libssh2_md5() since it wasn't used - Previously the function would ignore all errors except for EAGAIN. - -- sftp_read: extend and clarify the documentation + Reported-by: Kamil Dudka -- sftp_read: cap the read ahead maximum amount - - Now we only go up to LIBSSH2_CHANNEL_WINDOW_DEFAULT*30 bytes SFTP read - ahead, which currently equals 64K*30 == 1966080 bytes. +- [LarsNordin-LNdata brought this change] -- _libssh2_channel_read: fix non-blocking window adjusting + SFTP: Increase speed and datasize in SFTP read - If EAGAIN is returned when adjusting the receive window, we must not - read from the transport directly until we've finished the adjusting. + The function sftp_read never return more then 2000 bytes (as it should + when I asked Daniel). I increased the MAX_SFTP_READ_SIZE to 30000 but + didn't get the same speed as a sftp read in SecureSSH. I analyzed the + code and found that a return always was dona when a chunk has been read. + I changed it to a sliding buffer and worked on all available chunks. I + got an increase in speed and non of the test I have done has failed + (both local net and over Internet). Please review and test. I think + 30000 is still not the optimal MAX_SFTP_READ_SIZE, my next goal is to + make an API to enable changing this value (The SecureSSH sftp_read has + more complete filled packages when comparing the network traffic) -Guenter Knauf (8 Sep 2011) -- Fix for systems which need sys/select.h. +- bump: start working on 1.6.1 -- The files were not gone but renamed ... +Version 1.6.0 (5 Jun 2015) -Daniel Stenberg (6 Sep 2011) -- sftp_read: added documenting comment - - Taken from some recent email conversations I added some descriptions of - the logic in sftp_read() to aid readers. +Daniel Stenberg (5 Jun 2015) +- RELEASE-NOTES: synced with 858930cae5c6a -- 1.3.1: start the work +Marc Hoersken (19 May 2015) +- wincng.c: fixed indentation -Version 1.3.0 (6 Sep 2011) +- [sbredahl brought this change] -Daniel Stenberg (6 Sep 2011) -- Makefile.am: the Makefile.win32 files are gone + wincng.c: fixed memleak in (block) cipher destructor -- RELEASE-NOTES: updated for 1.3.0 +Alexander Lamaison (6 May 2015) +- [Jakob Egger brought this change] -- sftp_read: a short read is not end of file - - A returned READ packet that is short will now only reduce the - offset. + libssh2_channel_open: more detailed error message - This is a temporary fix as it is slightly better than the previous - approach but still not very good. + The error message returned by libssh2_channel_open in case of a server side channel open failure is now more detailed and includes the four standard error conditions in RFC 4254. -- [liuzl brought this change] +- [Hannes Domani brought this change] - _libssh2_packet_add: adjust window size when truncating + kex: fix libgcrypt memory leaks of bignum - When receiving more data than what the window size allows on a - particular channel, make sure that the window size is adjusted in that - case too. Previously it would only adjust the window in the non-error - case. - -Guenter Knauf (29 Aug 2011) -- Silent compiler warning with MinGW64. - -- Fixed link to native Win32 awk tool. + Fixes #168. -- Renamed MinGW makefiles. +Marc Hoersken (3 Apr 2015) +- configure.ac: check for SecureZeroMemory for clear memory feature -- Some MinGW makefile tweaks. +- Revert "wincng.c: fix clear memory feature compilation with mingw" - Enable build without GNU tools and with MinGW64 compiler. + This reverts commit 2d2744efdd0497b72b3e1ff6e732aa4c0037fc43. + + Autobuilds show that this did not solve the issue. + And it seems like RtlFillMemory is defined to memset, + which would be optimized out by some compilers. -- Fixed aes_ctr_do_cipher() signature. +- wincng.c: fix clear memory feature compilation with mingw -Daniel Stenberg (26 Aug 2011) -- [liuzl brought this change] +Alexander Lamaison (1 Apr 2015) +- [LarsNordin-LNdata brought this change] - libssh2_sftp_seek64: flush packetlist and buffered data + Enable use of OpenSSL that doesn't have DSA. - When seeking to a new position, flush the packetlist and buffered data - to prevent already received or pending data to wrongly get used when - sftp-reading from the new offset within the file. + Added #if LIBSSH2_DSA for all DSA functions. -- sftp_read: advance offset correctly for buffered copies - - In the case where a read packet has been received from the server, but - the entire contents couldn't be copied to the user-buffer, the data is - instead buffered and copied to the user's buffer in the next invocation - of sftp_read(). When that "extra" copy is made, the 'offset' pointer was - not advanced accordingly. - - The biggest impact of this flaw was that the 'already' variable at the - top of the function that figures out how much data "ahead" that has - already been asked for would slowly go more and more out of sync, which - could lead to the file not being read all the way to the end. +- [LarsNordin-LNdata brought this change] + + Use correct no-blowfish #define with OpenSSL. - This problem was most noticable in cases where the application would - only try to read the exact file size amount, like curl does. In the - examples libssh2 provides the sftp read function is most often called - with a fixed size large buffer and then the bug would not appear as - easily. + The OpenSSL define is OPENSSL_NO_BF, not OPENSSL_NO_BLOWFISH. + +Marc Hoersken (25 Mar 2015) +- configure: error if explicitly enabled clear-memory is not supported - This bug was introduced in the SFTP rewrite in 1.2.8. + This takes 22bd8d81d8fab956085e2079bf8c29872455ce59 and + b8289b625e291bbb785ed4add31f4759241067f3 into account, + but still makes it enabled by default if it is supported + and error out in case it is unsupported and was requested. + +Daniel Stenberg (25 Mar 2015) +- configure: make clear-memory default but only WARN if backend unsupported - Bug: http://curl.haxx.se/mail/lib-2011-08/0305.html - http://www.libssh2.org/mail/libssh2-devel-archive-2011-08/0085.shtml + ... instead of previous ERROR. -- wrap some long lines < 80 columns +Marc Hoersken (24 Mar 2015) +- wincng.h: fix warning about computed return value not being used -- LIBSSH2_RECV: fix typo, use the RECV_FD macro +- nonblocking examples: fix warning about unused tvdiff on Mac OS X -- subsystem_netconf.c: fix compiler warnings +Daniel Stenberg (24 Mar 2015) +- openssl: fix compiler warnings -- [Henrik Nordstrom brought this change] +- cofigure: fix --disable-clear-memory check - Custom callbacks for performing low level socket I/O +Marc Hoersken (23 Mar 2015) +- scp.c: improved command length calculation + + Reduced number of calls to strlen, because shell_quotearg already + returns the length of the resulting string (e.q. quoted path) + which we can add to the existing and known cmd_len. + Removed obsolete call to memset again, because we can put a final + NULL-byte at the end of the string using the calculated length. -- version bump: start working towards 1.3.0 +- scp.c: improved and streamlined formatting -Version 1.2.9 (16 Aug 2011) +- scp.c: fix that scp_recv may transmit not initialised memory -Daniel Stenberg (16 Aug 2011) -- RELEASE-NOTES: synced with 95d69d3a81261 +- scp.c: fix that scp_send may transmit not initialised memory + + Fixes ticket 244. Thanks Torsten. -- [Henrik Nordstrom brought this change] +- kex: do not ignore failure of libssh2_sha1_init() + + Based upon 43b730ce56f010e9d33573fcb020df49798c1ed8. + Fixes ticket 290. Thanks for the suggestion, mstrsn. - Document prototypes for macro defined functions +- wincng.h: fix return code of libssh2_md5_init() -- [Henrik Nordstrom brought this change] +- openssl.c: fix possible segfault in case EVP_DigestInit fails - Avoid reuse after free when closing X11 channels +- wincng.c: fix possible use of uninitialized variables -- _libssh2_channel_write: handle window_size == 0 better - - When about to send data on the channel and the window size is 0, we must - not just return 0 if the transport_read() function returned EAGAIN as it - then causes a busy-loop. - - Bug: http://libssh2.org/mail/libssh2-devel-archive-2011-08/0011.shtml +- wincng.c: fix unused argument warning if clear memory is not enabled -- gettimeofday: fix name space pollution +- wincng: Added explicit clear memory feature to WinCNG backend - For systems without its own gettimeofday() implementation, we still must - not provide one outside our namespace. + This re-introduces the original feature proposed during + the development of the WinCNG crypto backend. It still needs + to be added to libssh2 itself and probably other backends. - Reported by: Bill Segall + Memory is cleared using the function SecureZeroMemory which is + available on Windows systems, just like the WinCNG backend. -Dan Fandrich (5 Aug 2011) -- libssh2.pc.in: Fixed spelling in pkgconfig file +- wincng.c: fixed mixed line-endings -Peter Stuge (17 Jul 2011) -- example/subsystem_netconf.c: Add missing #include +- wincng.c: fixed use of invalid parameter types in a8d14c5dcf -- example/subsystem_netconf.c: Discard ]]>]]> and return only XML response +- wincng.c: only try to load keys corresponding to the algorithm -- example/subsystem_netconf.c: Fix uninitialized variable bug +- wincng.c: moved PEM headers into definitions -- example: Add subsystem_netconf.c - - This example demonstrates how to use libssh2 to send a request to - the NETCONF subsystem available e.g. in JunOS. - - See also http://tools.ietf.org/html/draft-ietf-netconf-ssh-06 +- wincng.h: fixed invalid parameter name -Daniel Stenberg (16 Jul 2011) -- man page cleanups: non-existing functions need no man pages +- wincng: fixed mismatch with declarations in crypto.h -- libssh2_new_host_entry.3: removed - - This is just junk leftovers. +- userauth.c: fixed warning C6001: using uninitialized sig and sig_len -- userauth_keyboard_interactive: fix buffer overflow +- pem.c: fixed warning C6269: possible incorrect order of operations + +- wincng: add support for authentication keys to be passed in memory - Partly reverse 566894494b4972ae12 which was simplifying the code far too - much and ended up overflowing a buffer within the LIBSSH2_SESSION - struct. Back to allocating the buffer properly like it used to do. + Based upon 18cfec8336e and daa2dfa2db. + +- pem.c: add _libssh2_pem_parse_memory to parse PEM from memory - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-06/0032.shtml - Reported by: Alfred Gebert + Requirement to implement 18cfec8336e for Libgcrypt and WinCNG. -- keyboard-interactive man page: cleaned up +- pem.c: fix copy and paste mistake from 55d030089b8 -- [Alfred Gebert brought this change] +- userauth.c: fix another possible dereference of a null pointer - _libssh2_recv(): handle ENOENT error as EAGAIN - - A sftp session failed with error "failure establishing ssh session" on - Solaris and HP-UX. Sometimes the first recv() function call sets errno - to ENOENT. In the man pages for recv of Solaris and HP-UX the error - ENOENT is not documented. - - I tested Solaris SPARC and x86, HP-UX i64, AIX, Windows and Linux. +- userauth.c: fix possible dereference of a null pointer -- agent_list_identities: fix out of scope access - - An auto variable out of scope was being referenced and used. - - fixes #220 +- pem.c: reduce number of calls to strlen in readline -- _libssh2_wait_socket: fix timeouts for poll() uses +Alexander Lamaison (17 Mar 2015) +- [Will Cosgrove brought this change] -- windows: inclusion fix + Initialise HMAC_CTX in more places. - include winsock2.h for all windows compilers + Missed a couple more places we init ctx to avoid openssl threading crash. -- keyb-interactive: add the fixed buffer +- Build build breakage in WinCNG backend caused when adding libssh2_userauth_publickey_frommemory. - Belongs to commit 5668944 - -- code cleanup: don't use C99/c++ comments + The new feature isn't implemented for the WinCNG backend currently, but the WinCNG backend didn't contain any implementation of the required backend functions - even ones that returns an error. That caused link errors. - We aim for C89 compliance + This change fixes the problem by providing an implementation of the backend functions that returns an error. -- keyb-interactive: allow zero length fields - - Allow zero length fields so they don't cause malloc(0) calls - - Avoid free()ing NULL pointers +- Fix breakage in WinCNG backend caused by introducing libssh2_hmac_ctx_init. - Avoid a malloc of a fixed 5 byte buffer. + The macro was defined to nothing for the libgcrypt backend, but not for WinCNG. This brings the latter into line with the former. -- libssh2_channel_process_startup.3: clean up +Daniel Stenberg (15 Mar 2015) +- userauth_publickey_frommemory.3: add AVAILABILITY - Remove the references to the macro-fied shortcuts as they have their own - individual man pages. + ... it will be added in 1.6.0 + +- libssh2: next version will be called 1.6.0 - Made the prototype different and more readable. + ... since we just added a new function. -- man page: fix .BR lines +- docs: add libssh2_userauth_publickey_frommemory.3 to dist - We don't use \fI etc on .BR lines + The function and man page were added in commit 18cfec8336e -- userauth_keyboard_interactive: skip code on zero length auth +- [Jakob Egger brought this change] -- libssh2_channel_forward_accept.3: mention how to get error + direct_tcpip: Fixed channel write - Since this returns a pointer, libssh2_session_last_errno() must be used - to get the actual error code and it wasn't that clear before. + There were 3 bugs in this loop: + 1) Started from beginning after partial writes + 2) Aborted when 0 bytes were sent + 3) Ignored LIBSSH2_ERROR_EAGAIN + + See also: + https://trac.libssh2.org/ticket/281 + https://trac.libssh2.org/ticket/293 -- timeout docs: mention they're added in 1.2.9 +Alexander Lamaison (15 Mar 2015) +- [Will Cosgrove brought this change] -- sftp_write_sliding.c: indent fix + Must init HMAC_CTX before using it. - Use the standard indenting and removed CVS leftover comment - -- [zl liu brought this change] + Must init ctx before using it or openssl will reuse the hmac which is not thread safe and causes a crash. + Added libssh2_hmac_ctx_init macro. - sftp_write_sliding: send the complete file +- Add continuous integration configurations. - When reaching the end of file there can still be data left not sent. + Linux-based CI is done by Travis CI. Windows-based CI is done by Appveyor. -- [Douglas Masterson brought this change] +- [David Calavera brought this change] - session_startup: init state properly + Allow authentication keys to be passed in memory. - libssh2_session_startup() didn't set the state correctly so it could get - confused. + All credits go to Joe Turpin, I'm just reaplying and cleaning his patch: + http://www.libssh2.org/mail/libssh2-devel-archive-2012-01/0015.shtml - Fixes #218 - -- timeout: added man pages + * Use an unimplemented error for extracting keys from memory with libgcrypt. -- BLOCK_ADJUST_ERRNO: move rc to right level - - We can't declare the variable within the block and use it in the final - do-while() expression to be properly portable C89. +Daniel Stenberg (14 Mar 2015) +- docs: include the renamed INSTALL* files in dist -- [Matt Lilley brought this change] +Alexander Lamaison (13 Mar 2015) +- Prevent collisions between CMake and Autotools in examples/ and tests/. - adds a timeout to blocking calls +- Avoid clash between CMake build and Autotools. - Fixes bug #160 as per Daniel's suggestion + Autotools expects a configuration template file at src/libssh2_config.h.in, which buildconf generates. But the CMake build system has its CMake-specific version of the file at this path. This means that, if you don't run buildconf, the Autotools build will fail because it configured the wrong header template. - Adds libssh2_session_set_timeout() and libssh2_session_get_timeout() + See https://github.com/libssh2/libssh2/pull/8. -- SCP: fix incorrect error code +- Merge pull request #8 from alamaison/cmake - After an error occurs in libssh2_scp_recv() or libssh2_scp_send(), the - function libssh2_session_last_error() would return - LIBSSH2_ERROR_SOCKET_NONE on error. + CMake build system. + +- CMake build system. - Bug: http://trac.libssh2.org/ticket/216 - Patch by: "littlesavage" + Tested: + - Windows: + - Visual C++ 2005/2008/2010/2012/2013/MinGW-w64 + - static/shared + - 32/64-bit + - OpenSSL/WinCNG + - Without zlib + - Linux: + - GCC 4.6.3/Clang 3.4 + - static/shared + - 32/64-bit + - OpenSSL/Libgcrypt + - With/Without zlib + - MacOS X + - AppleClang 6.0.0 + - static + - 64-bit + - OpenSSL + - Without zlib - Fixes #216 + Conflicts: + README -Guenter Knauf (19 Apr 2011) -- Updated default (recommended) dependency versions. +- Man man syntax tests fail gracefully if man version is not suitable. -Daniel Stenberg (17 Apr 2011) -- libssh2_session_block_directions: fix mistake - - The last LIBSSH2_SESSION_BLOCK_INBOUND should be - LIBSSH2_SESSION_BLOCK_OUTBOUND - - And I shortened the short description +- Return valid code from test fixture on failure. - Reported by: "drswinghead" + The sshd test fixture was returning -1 if an error occurred, but negative error codes aren't technically valid (google it). Bash on Windows converted them to 0 which made setup failure look as though all tests were passing. -- msvcproj: added libs and debug stuff - - Added libraries needed to link whether using openssl dynamically or - statically +- Let mansyntax.sh work regardless of where it is called from. + +Daniel Stenberg (12 Mar 2015) +- [Viktor Szakáts brought this change] + + mingw build: allow to pass custom CFLAGS - Added LIBSSH2DEBUG define to debug versions to enable tracing + Allow to pass custom `CFLAGS` options via environment variable + `LIBSSH2_CFLAG_EXTRAS`. Default and automatically added options of + `GNUmakefile` have preference over custom ones. This addition is useful + for passing f.e. custom CPU tuning or LTO optimization (`-flto + -ffat-lto-objects`) options. The only current way to do this is to edit + `GNUmakefile`. This patch makes it unnecessary. - URL: http://trac.libssh2.org/ticket/215 - Patch by: Mark Smith + This is a mirror of similar libcurl patch: + https://github.com/bagder/curl/pull/136 -- sftp_write: clean offsets on error +- [Will Cosgrove brought this change] + + userauth: Fixed prompt text no longer being copied to the prompts struct - When an error has occurred on FXP_WRITE, we must make sure that the - offset, sent offset and acked counter are reset properly. + Regression from 031566f9c -- example/.gitignore: ignore built binaries +- README: update the git repo locations -- sftp_write: flush the packetlist on error +- wait_socket: wrong use of difftime() - When an error occurs during write, flush the entire list of pending - outgoing SFTP packets. - -- keepalive: add first basic man pages + With reversed arguments it would always return a negative value... - Someone on IRC pointed out that we don't have these documented so I - wrote up a first set based on the information in the wiki: - http://trac.libssh2.org/wiki/KeepAlive + Bug: https://github.com/bagder/libssh2/issues/1 -- scp_write_nonblock.c: remove pointless check - - libssh2_channel_write() cannot return a value that is larger than the - input length value +- bump: start working toward 1.5.1 now -Mikhail Gusarov (9 Apr 2011) -- s/\.NF/.nf/ to fix wrong macro name caught by man --warnings +Version 1.5.0 (11 Mar 2015) -Daniel Stenberg (6 Apr 2011) -- version: bump to 1.2.9_dev - - Also update the copyright year range to include 2011 +Daniel Stenberg (11 Mar 2015) +- RELEASE-NOTES: 1.5.0 release -- configure: fix $VERSION +- [Mariusz Ziulek brought this change] + + kex: bail out on rubbish in the incoming packet - Stop using the $VERSION variable as it seems to be magically used by - autoconfig itself and thus gets set to the value set in AC_INIT() - without us wanting that. $LIBSSH2VER is now the libssh2 version as - detected. + CVE-2015-1782 - Reported by: Paul Howarth - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-04/0008.shtml - -- maketgz: use git2news.pl by the correct name + Bug: http://www.libssh2.org/adv_20150311.html -Version 1.2.8 (4 Apr 2011) +- docs: move INSTALL, AUTHORS, HACKING and TODO to docs/ + + And with this, cleanup README to be shorter and mention the new source + code home. -Daniel Stenberg (4 Apr 2011) -- RELEASE-NOTES: synced with fabf1a45ee +- .gitignore: don't ignore INSTALL -- NEWS: auto-generated from git - - Starting now, the NEWS file is generated from git using the git2news.pl - script. This makes it always accurate and up-to-date, even for daily - snapshots etc. +Dan Fandrich (4 Mar 2015) +- examples/x11.c: include sys/select.h for improved portability -- sftp_write: handle FXP_WRITE errors +Daniel Stenberg (4 Mar 2015) +- RELEASE-NOTES: synced with a8473c819bc068 - When an sftp server returns an error back on write, make sure the - function bails out and returns the proper error. + In preparation for the upcoming 1.5.0 release. -- configure: stop using the deprecated AM_INIT_AUTOMAKE syntax +Guenter Knauf (8 Jan 2015) +- NetWare build: added some missing exports. -Alexander Lamaison (13 Mar 2011) -- Support unlimited number of host names in a single line of the known_hosts file. - - Previously the code assumed either a single host name or a hostname,ip-address pair. However, according to the spec [1], there can be any number of comma separated host names or IP addresses. +Marc Hoersken (29 Dec 2014) +- knownhost.c: fix use of uninitialized argument variable wrote - [1] http://www.openbsd.org/cgi-bin/man.cgi?query=sshd&sektion=8 + Detected by clang scan in line 1195, column 18. -Daniel Stenberg (26 Feb 2011) -- libssh2_knownhost_readfile.3: clarify return value +- examples/x11.c: fix result of operation is garbage or undefined - This function returns the number of parsed hosts on success, not just - zero as previously documented. + Fix use of uninitialized structure w_size_bck. + Detected by clang scan in line 386, column 28. -Peter Stuge (26 Feb 2011) -- Don't save allocated packet size until it has actually been allocated +- examples/x11.c: remove dead assigments of some return values - The allocated packet size is internal state which needs to match reality - in order to avoid problems. This commit fixes #211. + Detected by clang scan in line 212, column 9. + Detected by clang scan in line 222, column 13. + Detected by clang scan in line 410, column 13. -Daniel Stenberg (21 Feb 2011) -- [Alfred Gebert brought this change] +- examples/x11.c: fix possible memory leak if read fails + + Detected by clang scan in line 224, column 21. - session_startup: manage server data before server identification +- examples/x11.c: fix invalid removal of first list element - Fix the bug that libssh2 could not connect if the sftp server - sends data before sending the version string. + Fix use of memory after it was being freed. + Detected by clang scan in line 56, column 12. + +- userauth.c: make sure that sp_len is positive and avoid overflows - http://tools.ietf.org/html/rfc4253#section-4.2 + ... if the pointer subtraction of sp1 - pubkey - 1 resulted in a + negative or larger value than pubkey_len, memchr would fail. - "The server MAY send other lines of data before sending the version - string. Each line SHOULD be terminated by a Carriage Return and Line - Feed. Such lines MUST NOT begin with "SSH-", and SHOULD be encoded - in ISO-10646 UTF-8 [RFC3629] (language is not specified). Clients - MUST be able to process such lines." - -- [Alfred Gebert brought this change] + Reported by Coverity CID 89846. - fullpacket: decompression only after init +- channel.c: remove logically dead code, host cannot be NULL here - The buffer for the decompression (remote.comp_abstract) is initialised - in time when it is needed. With this fix decompression is disabled when - the buffer (remote.comp_abstract) is not initialised. + ... host cannot be NULL in line 525, because it is always + valid (e.g. at least set to "0.0.0.0") after lines 430 and 431. - Bug: http://trac.libssh2.org/ticket/200 + Reported by Coverity CID 89807. -- _libssh2_channel_read: store last error +- session.c: check return value of session_nonblock during startup - When the transport layer returns EAGAIN this function didn't call - _libssh2_error() which made the last_error not get set. - -- sftp_write: clarified the comment header + Reported by Coverity CID 89803. -- sftp_read: avoid wrapping counter to insanity - - As pointed out in bug #206, if a second invoke of libssh2_sftp_read() - would shrink the buffer size, libssh2 would go nuts and send out read - requests like crazy. This was due to an unsigned variable turning - "negative" by some wrong math, and that value would be the amount of - data attempt to pre-buffer! +- session.c: check return value of session_nonblock in debug mode - Bug: http://trac.libssh2.org/ticket/206 + Reported by Coverity CID 89805. -- sftp_packet_read: use 32bit variables for 32bit data +- pem.c: fix mixed line-endings introduced with 8670f5da24 -- libssh2_sftp_stat_ex.3: cleaned up, extended - - Removed the macros from it as they have their own man pages. +- pem.c: make sure there's a trailing zero and b64data is not NULL - Added the LIBSSH2_SFTP_ATTRIBUTES struct in here for easier reference. + ... if there is no base64 data between PEM header and footer. + Reported by Coverity CID 89823. -- sftp_readdir: return error if buffer is too small +- kex.c: make sure mlist is not set to NULL - If asked to read data into a buffer and the buffer is too small to hold - the data, this function now returns an error instead of as previously - just copy as much as fits. + ... if the currently unsupported LANG methods are called. + Reported by Coverity CID 89834. -- sftp_symlink: return error if receive buffer too small - - and clean up some variable type mismatches +- packet.c: i < 256 was always true and i would overflow to 0 - Discussion: http://www.libssh2.org/mail/libssh2-devel-archive-2011-01/0001.shtml + Visualize that the 0-termination is intentional, because the array + is later passed to strlen within _libssh2_packet_askv. -- docs: clarify what happens with a too small buffer - - This flaw is subject to change, but I figured it might be valuable to - users of existing code to know how it works. +- silence multiple data conversion warnings -- channel_request_pty_size: fix reqPTY_state - - The state variable isn't properly set so every other call to the - function fails! +Daniel Stenberg (23 Dec 2014) +- agent_connect_unix: make sure there's a trailing zero - Bug: http://libssh2.org/mail/libssh2-devel-archive-2010-12/0096.shtml - Reported by: Steve Legg + ... if the path name was too long. Reported by Coverity CID 89801. -- data size: cleanup +Marc Hoersken (22 Dec 2014) +- examples on Windows: use native SOCKET-type instead of int - Fix 64bit warnings by using (s)size_t and dedicated uint32_t types more. + And check return values accordingly. -- [Pierre Joye brought this change] +- userauth.c: improve readability and clarity of for-loops - ssize_t: proper typedef with MSVC compilers +Daniel Stenberg (22 Dec 2014) +- calloc: introduce LIBSSH2_CALLOC() - As discussed on the mailing list, it was wrong for win64 and using the - VC-provided type is the safest approach instead of second- guessing - which one it should be. + A simple function using LIBSSH2_ALLOC + memset, since this pattern was + used in multiple places and this simplies code in general. -Guenter Knauf (22 Dec 2010) -- Updated OpenSSL version. +Marc Hoersken (15 Dec 2014) +- libssh2_priv.h: Ignore session, context and format parameters -- Expanded tabs to spaces. +- x11 example: check return value of socket function -Peter Stuge (21 Dec 2010) -- [Joey Degges brought this change] +- examples: fixed mixed line-endings introduced with aedfba25b8 - _libssh2_ntohu64: fix conversion from network bytes to uint64 +- wincng.c: explicitly ignore BCrypt*AlgorithmProvider return codes - Cast individual bytes to uint64 to avoid overflow in arithmetic. + Fixes VS2012 code analysis warning C6031: + return value ignored: could return unexpected value -Daniel Stenberg (20 Dec 2010) -- libssh2_userauth_list: language fix +- wincng.c: fix possible invalid memory write access - "faily" is not a good English word, and I also cleaned up some other minor - mistakes + Fixes VS2012 code analysis warning C6386: + buffer overrun: accessing 'pbOutput', the writable size is + 'cbOutput' bytes, but '3' bytes may be written: libssh2 wincng.c 610 -- crypto: unify the generic functions +- tests on Windows: check for WSAStartup return code - Added crypto.h that is the unified header to include when using crypto - functionality. It should be the only header that needs to adapt to the - underlying crypto library in use. It provides the set of prototypes that - are library agnostic. - -- [Mark Smith brought this change] + Fixes VS2012 code analysis warning C6031: + return value ignored: could return unexpected value - userauth: derive publickey from private - - Pass a NULL pointer for the publickey parameter of - libssh2_userauth_publickey_fromfile and - libssh2_userauth_hostbased_fromfile functions. In this case, the - functions recompute the public key from the private key file data. - - This is work done by Jean-Louis CHARTON - , then adapted by Mark Smith and - slightly edited further by me Daniel. +- wincng.c: fix possible NULL pointer de-reference of bignum - WARNING: this does leave the feature NOT WORKING when libssh2 is built - to use libgcrypt instead of OpenSSL simply due to lack of - implementation. - -- ssh2_echo: Value stored to 'exitcode' is never read + Fixes VS2012 code analysis warning C6011: + dereferencing NULL pointer 'bignum'. libssh2 wincng.c 1567 -- _libssh2_packet_add: fix SSH_MSG_DEBUG weirdness +- wincng.c: fix possible use of uninitialized memory - I believe I may have caused this weird typo style error when I cleaned - up this function a while ago. Corrected now. + Fixes VS2012 code analysis warning C6001: + using uninitialized memory 'cbDecoded'. libssh2 wincng.c 553 -- uint32: more longs converted to proper types +- packet.c: fix possible NULL pointer de-reference within listen_state - I also moved the MAC struct over to the mac.h header file and made sure - that the users of that struct include that file. + Fixes VS2012 code analysis warning C6011: + dereferencing NULL pointer 'listen_state->channel'. libssh2 packet.c 221 -- SFTP: more types to uint32_t +- kex.c: fix possible NULL pointer de-reference with session->kex - The 'num_names' field in the SSH_FXP_NAME response is an unsigned 32bit - value so we make sure to treat it like that. + Fixes VS2012 code analysis warning C6011: + dereferencing NULL pointer 'session->kex'. libssh2 kex.c 1761 -- SFTP: request_ids are uint32_t +- agent.c: check return code of MapViewOfFile - I went over the code and made sure we use uint32_t all over for the - request_id data. It is an unsigned 32bit value on the wire. - -- SFTP: store request_id separately in packets + Fixes VS2012 code analysis warning C6387: 'p+4' may be '0': + this does not adhere to the specification for the function + 'memcpy': libssh2 agent.c 330 - By using a new separate struct for incoming SFTP packets and not sharing - the generic packet struct, we can get rid of an unused field and add a - new one dedicated for holding the request_id for the incoming - package. As sftp_packet_ask() is called fairly often, a "mere" integer - comparison is MUCH faster than the previous memcmp() of (typically) 5 - bytes. + Fixes VS2012 code analysis warning C6387: 'p' may be '0': + this does not adhere to the specification for the function + 'UnmapViewOfFile': libssh2 agent.c 333 -- libssh2_sftp_open_ex: man page extended and cleaned up +- examples on Windows: check for socket return code - I added the missing documentation for the 'flags' argument. + Fixes VS2012 code analysis warning C28193: + The variable holds a value that must be examined -- SFTP: unify the READ/WRITE chunk structs +- examples on Windows: check for WSAStartup return code + + Fixes VS2012 code analysis warning C6031: + return value ignored: could return unexpected value + +Guenter Knauf (11 Dec 2014) +- wincng.c: silent some more gcc compiler warnings. + +- wincng.c: silent gcc compiler warnings. + +- Watcom build: added support for WinCNG build. + +- build: updated dependencies in makefiles. + +Daniel Stenberg (4 Dec 2014) +- configure: change LIBS not LDFLAGS when checking for libs + + Closes #289 + + Patch-by: maurerpe + +Guenter Knauf (3 Dec 2014) +- MinGW build: some more GNUMakefile tweaks. + + test/GNUmakefile: added architecture autodetection; added switches to + CFLAGS and RCFLAGS to make sure that the right architecture is used. + Added support to build with WinCNG. + +- sftpdir.c: added authentication method detection. + + Stuff copied over from ssh2.c to make testing a bit easier. + +- NMake build: fixed LIBS settings. + +- NMake build: added support for WinCNG build. + +- MinGW build: some GNUMakefile tweaks. + + Added architecture autodetection; added switches to CFLAGS and + RCFLAGS to make sure that the right architecture is used. + Added support to build with WinCNG. + +- MinGW build: Fixed redefine warnings. + +- Updated copyright year. + +Daniel Stenberg (31 Aug 2014) +- COPYING: bump the copyright year + +Dan Fandrich (28 Jul 2014) +- docs: fixed a bunch of typos + +- docs: added missing libssh2_session_handshake.3 file + +Marc Hoersken (19 May 2014) +- wincng.c: specify the required libraries for dependencies using MSVC + + Initially reported by Bob Kast as "for MS VS builds, specify the + libraries that are required so they don't need to go into all + project files that may use this library". Thanks a lot. + +- [Bob Kast brought this change] + + windows build: do not export externals from static library + + If you are building a DLL, then you need to explicitly export each + entry point. When building a static library, you should not. + + libssh2 was exporting the entry points whether it was building a DLL or a + static library. To elaborate further, if libssh2 was used as a static + library, which was being linked into a DLL, the libssh2 API would be + exported from that separate DLL. + +Daniel Stenberg (19 May 2014) +- [Mikhail Gusarov brought this change] + + Fix typos in manpages + +Marc Hoersken (18 May 2014) +- wincng.c: Fixed memory leak in case of an error during ASN.1 decoding + +- configure: Display individual crypto backends on separate lines + + This avoids line-wrapping in between parameters and makes the + error message look like the following: + + configure: error: No crypto library found! + Try --with-libssl-prefix=PATH + or --with-libgcrypt-prefix=PATH + or --with-wincng on Windows + +- [Bob Kast brought this change] + + libssh2_priv.h: a 1 bit bit-field should be unsigned + + some compilers may not like this + +- knownhost.c: Fixed warning that pointer targets differ in signedness + +- wincng.c: Fixed warning about pointer targets differing in signedness + +- tcpip-forward.c: Fixed warning that pointer targets differ in signedness + + libssh2_channel_forward_listen_ex uses ints instead of unsigned ints. + +- misc.c: Fixed warning about mixed declarations and code + +- libgcrypt.h: Fixed warning about pointer targets differing in signedness + +- wincng.h: Fixed warning about pointer targets differing in signedness + +- misc.c: Fixed warning about unused parameter abstract + +- tcpip-forward.c: Removed unused variables shost, sport and sockopt + +- wincng.h: Added forward declarations for all WinCNG functions + + Initially reported by Bob Kast as "Wincng - define function + prototypes for wincng routines". Thanks a lot. + + Also replaced structure definitions with type definitions. + +- [Bob Kast brought this change] + + libssh2.h: on Windows, a socket is of type SOCKET, not int + +- win32: Added WinCNG targets to generated Visual Studio project + + Inspired by Bob Kast's reports, this commit enables the compilation + of libssh2 with WinCNG using the generated Visual Studio project files. + This commit adds WinCNG support to parts of the existing Win32 build + infrastructure, until new build systems, like pre-defined VS project + files or CMake files may be added. + + This commit and b20bfeb3e519119a48509a1099c06d65aa7da1d7 raise one + question: How to handle build systems, like VS project files, that + need to include all source files regardless of the desired target, + including all supported crypto backends? For now the mentioned commit + added a check for LIBSSH2_OPENSSL to openssl.c and with this commit + the supported crypto backends are hardcoded within Makefile.am. + +- libssh2_priv msvc: Removed redundant definition of inline keyword + + Initially reported by Bob Kast as "Remove redundant 'inline' define". + Thanks a lot. + +- wincng: Made data parameter to hash update function constant + + Initially reported by Bob Kast as "formal parameter must be const + since it is used in contexts where the actual parameter may be const". + Thanks a lot. + +- wincng: fix cross-compilation against the w64 mingw-runtime package + +- openssl: Check for LIBSSH2_OPENSSL in order to compile with openssl + +- wincng: Fixed use of possible uninitialized variable pPaddingInfo + + Reported by Bob Kast, thanks a lot. + +- wincng: Added cast for double to unsigned long conversion + +- wincng: Cleaned up includes and check NTSTATUS using macro + + Removed header file combination that is not supported on a real + Windows platform and can only be compiled using MinGW. Replaced + custom NTSTATUS return code checks with BCRYPT_SUCCESS macro. + +Daniel Stenberg (16 Mar 2014) +- userauth_hostbased_fromfile: zero assign to avoid uninitialized use + + Detected by clang-analyze + +- channel_receive_window_adjust: store windows size always + + Avoid it sometimes returning without storing it, leaving calling + functions with unknown content! + + Detected by clang-analyzer + +- publickey_packet_receive: avoid junk in returned pointers + + clang-analyzer found this risk it would return a non-initialized pointer + in a success case + +Peter Stuge (16 Mar 2014) +- [Marc Hoersken brought this change] + + Added Windows Cryptography API: Next Generation based backend + +- [Marc Hoersken brought this change] + + knownhost.c: fixed that 'key_type_len' may be used uninitialized + + ../src/knownhost.c: In function 'libssh2_knownhost_readline': + ../src/knownhost.c:651:16: warning: 'key_type_len' may be used + uninitialized in this function [-Wmaybe-uninitialized] + rc = knownhost_add(hosts, hostbuf, NULL, + ^ + ../src/knownhost.c:745:12: note: 'key_type_len' was declared here + size_t key_type_len; + ^ + +- [Marc Hoersken brought this change] + + pem.c: always compile pem.c independently of crypto backend + +- Fix non-autotools builds: Always define the LIBSSH2_OPENSSL CPP macro + + Commit d512b25f69a1b6778881f6b4b5ff9cfc6023be42 introduced a crypto + library abstraction in the autotools build system, to allow us to more + easily support new crypto libraries. In that process it was found that + all other build system which we support are hard-coded to build with + OpenSSL. Commit f5c1a0d98bd51aeb24aca3d49c7c81dcf8bd858d fixes automake + introduced into non-autotools build systems but still overlooked the + CPP macro saying that we are using OpenSSL. + + Thanks to Marc Hörsken for identifying this issue and proposing a fix + for win32/{GNUmakefile,config.mk}. This commit uses a slightly different + approach but the end result is the same. + +Dan Fandrich (15 Mar 2014) +- channel_close: Close the channel even in the case of errors + +- sftp_close_handle: ensure the handle is always closed + + Errors are reported on return, but otherwise the close path is + completed as much as possible and the handle is freed on exit. + +Alexander Lamaison (6 Mar 2014) +- knownhost: Restore behaviour of `libssh2_knownhost_writeline` with short buffer. + + Commit 85c6627c changed the behaviour of `libssh2_knownhost_writeline` so that it stopped returning the number of bytes needed when the given buffer was too small. Also, the function changed such that is might write to part of the buffer before realising it is too small. + + This commit restores the original behaviour, whilst keeping the unknown-key-type functionality that 85c6627c. Instead of writing to the buffer piecemeal, the length of the various parts is calculated up front and the buffer written only if there is enough space. The calculated necessary size is output in `outlen` regardless of whether the buffer was written to. + + The main use-case for the original behaviour that this commit restores is to allow passing in a NULL buffer to get the actual buffer size needed, before calling the function again with the buffer allocated to the exact size required. + +- knownhost: Fix DSS keys being detected as unknown. + + I missing `else` meant ssh-dss format keys were being re-detected as unknown format. + +Dan Fandrich (6 Mar 2014) +- knownhosts: Abort if the hosts buffer is too small + + This could otherwise cause a match on the wrong host + +- agent_list_identities: Fixed memory leak on OOM + +- Fixed a few typos + +- userauth: Fixed an attempt to free from stack on error + +- Fixed a few memory leaks in error paths + +- Fixed two potential use-after-frees of the payload buffer + + The first might occur if _libssh2_packet_add returns an error, as + fullpacket_state wasn't reset to idle so if it were possible for + fullpacket to be called again, it would return to the same state + handler and re-use the freed p->packet buffer. + + The second could occur if decrypt returned an error, as it freed the + packet buffer but did not clear total_num, meaning that freed buffer + could be written into again later. + +Alexander Lamaison (28 Nov 2013) +- Fix missing `_libssh2_error` in `_libssh2_channel_write`. + + In one case, the error code from `_libssh2_transport_read` was being returned from `_libssh2_channel_write` without setting it as the last error by calling `_libssh2_error`. This commit fixes that. + + Found when using a session whose socket had been inadvertently destroyed. The calling code got confused because via `libssh2_session_last_error` it appeared no error had occurred, despite one being returned from the previous function. + +Kamil Dudka (21 Nov 2013) +- [Mark McPherson brought this change] + + openssl: initialise the digest context before calling EVP_DigestInit() + + When using the OpenSSL libraries in FIPS mode, the function call + EVP_DigestInit() is actually #defined to FIPS_digestinit(). + Unfortunately wheres EVP_DigestInit() initialises the context and then + calls EVP_DigestInit_ex(), this function assumes that the context has + been pre-initialised and crashes when it isn't. + + Bug: https://trac.libssh2.org/ticket/279 + + Fixes #279 + +- [Marc Hörsken brought this change] + + .gitignore: Ignore files like src/libssh2_config.h.in~ + +Peter Stuge (13 Nov 2013) +- Move automake conditionals added by commit d512b25f out of Makefile.inc + + Commit d512b25f69a1b6778881f6b4b5ff9cfc6023be42 added automake + conditionals to Makefile.inc but since Makefile.inc is included + from Makefile for all other build systems that does not work. + + This commit instead adds Makefile.OpenSSL.inc and Makefile.libgcrypt.inc + and moves the automake conditional to its proper place, src/Makefile.am. + + The automake conditional includes the correct Makefile.$name.inc per + the crypto library selection/detection done by configure. + + All non-autotools build system files in libssh2 are hardcoded to use + OpenSSL and do not get a conditional but at least there is some reuse + because they can all include the new Makefile.OpenSSL.inc. + +Daniel Stenberg (27 Oct 2013) +- [Salvador Fandino brought this change] + + Set default window size to 2MB + + The default channel window size used until now was 256KB. This value is + too small and results on a bottleneck on real-life networks where + round-trip delays can easily reach 300ms. + + The issue was not visible because the configured channel window size + was being ignored and a hard-coded value of ~22MB being used instead, + but that was fixed on a previous commit. + + This patch just changes the default window size + (LIBSSH2_CHANNEL_WINDOW_DEFAULT) to 2MB. It is the same value used by + OpenSSH and in our opinion represents a good compromise between memory + used and transfer speed. + + Performance tests were run to determine the optimum value. The details + and related discussion are available from the following thread on the + libssh2 mailing-list: + + http://www.libssh2.org/mail/libssh2-devel-archive-2013-10/0018.shtml + http://article.gmane.org/gmane.network.ssh.libssh2.devel/6543 + + An excerpt follows: + + "I have been running some transfer test and measuring their speed. + + My setup was composed of a quad-core Linux machine running Ubuntu 13.10 + x86_64 with a LXC container inside. The data transfers were performed + from the container to the host (never crossing through a physical + network device). + + Network delays were simulated using the tc tool. And ping was used to + verify that they worked as intended during the tests. + + The operation performed was the equivalent to the following ssh command: + + $ ssh container "dd bs=16K count=8K if=/dev/zero" >/dev/null + + Though, establishment and closing of the SSH connection was excluded + from the timings. + + I run the tests several times transferring files of sizes up to 128MB + and the results were consistent between runs. + + The results corresponding to the 128MB transfer are available here: + + https://docs.google.com/spreadsheet/ccc?key=0Ao1yRmX6PQQzdG5wSFlrZl9HRWNET3ZyN0hnaGo5ZFE&usp=sharing + + It clearly shows that 256KB is too small as the default window size. + Moving to a 512MB generates a great improvement and after the 1MB mark + the returns rapidly diminish. Other factors (TCP window size, probably) + become more limiting than the channel window size + + For comparison I also performed the same transfers using OpenSSH. Its + speed is usually on par with that of libssh2 using a window size of 1MB + (even if it uses a 2MB window, maybe it is less aggressive sending the + window adjust msgs)." + + Signed-off-by: Salvador Fandino + +- [Salvador brought this change] + + _libssh2_channel_read: Honour window_size_initial + + _libssh2_channel_read was using an arbitrary hard-coded limit to trigger + the window adjusting code. The adjustment used was also hard-coded and + arbitrary, 15MB actually, which would limit the usability of libssh2 on + systems with little RAM. + + This patch, uses the window_size parameter passed to + libssh2_channel_open_ex (stored as remote.window_size_initial) plus the + buflen as the base for the trigger and the adjustment calculation. + + The memory usage when using the default window size is reduced from 22MB + to 256KB per channel (actually, if compression is used, these numbers + should be incremented by ~50% to account for the errors between the + decompressed packet sizes and the predicted sizes). + + My tests indicate that this change does not impact the performance of + transfers across localhost or a LAN, being it on par with that of + OpenSSH. On the other hand, it will probably slow down transfers on + networks with high bandwidth*delay when the default window size + (LIBSSH2_CHANNEL_WINDOW_DEFAULT=256KB) is used. + + Signed-off-by: Salvador Fandino + +- [Salvador Fandino brought this change] + + knownhosts: handle unknown key types + + Store but don't use keys of unsupported types on the known_hosts file. + + Currently, when libssh2 parses a known_host file containing keys of some + type it doesn't natively support, it stops reading the file and returns + an error. + + That means, that the known_host file can not be safely shared with other + software supporting other key types (i.e. OpenSSH). + + This patch adds support for handling keys of unknown type. It can read + and write them, even if they are never going to be matched. + + At the source level the patch does the following things: + + - add a new unknown key type LIBSSH2_KNOWNHOST_KEY_UNKNOWN + + - add a new slot (key_type_name) on the known_host struct that is + used to store the key type in ascii form when it is not supported + + - parse correctly known_hosts entries with unknown key types and + populate the key_type_name slot + + - print correctly known_hosts entries of unknown type + + - when checking a host key ignore keys that do not match the key + + Fixes #276 + +- windows build: fix build errors + + Fixes various link errors with VS2010 + + Reported-by: "kdekker" + Fixes #272 + +- man page: add missing function argument + + for libssh2_userauth_publickey_fromfile_ex() + + Reported-by: "pastey" + + Fixes #262 + +- [Salvador brought this change] + + Fix zlib deflate usage + + Deflate may return Z_OK even when not all data has been compressed + if the output buffer becomes full. + + In practice this is very unlikely to happen because the output buffer + size is always some KBs larger than the size of the data passed for + compression from the upper layers and I think that zlib never expands + the data so much, even on the worst cases. + + Anyway, this patch plays on the safe side checking that the output + buffer is not exhausted. + + Signed-off-by: Salvador + +- [Salvador brought this change] + + comp_method_zlib_decomp: Improve buffer growing algorithm + + The old algorithm was O(N^2), causing lots and lots of reallocations + when highly compressed data was transferred. + + This patch implements a simpler one that just doubles the buffer size + everytime it is exhausted. It results in O(N) complexity. + + Also a smaller inflate ratio is used to calculate the initial size (x4). + + Signed-off-by: Salvador + +- [Salvador brought this change] + + Fix zlib usage + + Data may remain in zlib internal buffers when inflate() returns Z_OK + and avail_out == 0. In that case, inflate has to be called again. + + Also, once all the data has been inflated, it returns Z_BUF_ERROR to + signal that the input buffer has been exhausted. + + Until now, the way to detect that a packet payload had been completely + decompressed was to check that no data remained on the input buffer + but that didn't account for the case where data remained on the internal + zlib buffers. + + That resulted in packets not being completely decompressed and the + missing data reappearing on the next packet, though the bug was masked + by the buffer allocation algorithm most of the time and only manifested + when transferring highly compressible data. + + This patch fixes the zlib usage. + + Signed-off-by: Salvador + +- [Salvador brought this change] + + _libssh2_channel_read: fix data drop when out of window + + After filling the read buffer with data from the read queue, when the + window size was too small, "libssh2_channel_receive_window_adjust" was + called to increase it. In non-blocking mode that function could return + EAGAIN and, in that case, the EAGAIN was propagated upwards and the data + already read on the buffer lost. + + The function was also moving between the two read states + "libssh2_NB_state_idle" and "libssh2_NB_state_created" both of which + behave in the same way (excepting a debug statment). + + This commit modifies "_libssh2_channel_read" so that the + "libssh2_channel_receive_window_adjust" call is performed first (when + required) and if everything goes well, then it reads the data from the + queued packets into the read buffer. + + It also removes the useless "libssh2_NB_state_created" read state. + + Some rotted comments have also been updated. + + Signed-off-by: Salvador + +- [Salvador Fandino brought this change] + + window_size: redid window handling for flow control reasons + + Until now, the window size (channel->remote.window_size) was being + updated just after receiving the packet from the transport layer. + + That behaviour is wrong because the channel queue may grow uncontrolled + when data arrives from the network faster that the upper layer consumes + it. + + This patch adds a new counter, read_avail, which keeps a count of the + bytes available from the packet queue for reading. Also, now the window + size is adjusted when the data is actually read by an upper layer. + + That way, if the upper layer stops reading data, the window will + eventually fill and the remote host will stop sending data. When the + upper layers reads enough data, a window adjust packet is delivered and + the transfer resumes. + + The read_avail counter is used to detect the situation when the remote + server tries to send data surpassing the window size. In that case, the + extra data is discarded. + + Signed-off-by: Salvador + +Peter Stuge (15 Sep 2013) +- configure.ac: Call zlib zlib and not libz in text but keep option names + +- configure.ac: Reorder --with-* options in --help output + +- configure.ac: Rework crypto library detection + + This further simplifies adding new crypto libraries. + +- Clean up crypto library abstraction in build system and source code + + libssh2 used to explicitly check for libgcrypt and default to OpenSSL. + + Now all possible crypto libraries are checked for explicitly, making + the addition of further crypto libraries both simpler and cleaner. + +- configure.ac: Add zlib to Requires.private in libssh2.pc if using zlib + +- Revert "Added Windows Cryptography API: Next Generation based backend" + + This reverts commit d385230e15715e67796f16f3e65fd899f21a638b. + +Daniel Stenberg (7 Sep 2013) +- [Leif Salomonsson brought this change] + + sftp_statvfs: fix for servers not supporting statfvs extension + + Fixes issue arising when server does not support statfvs and or fstatvfs + extensions. sftp_statvfs() and sftp_fstatvfs() after this patch will + handle the case when SSH_FXP_STATUS is returned from server. + +- [Marc Hoersken brought this change] + + Added Windows Cryptography API: Next Generation based backend + +- [Kamil Dudka brought this change] + + partially revert "window_size: explicit adjustments only" + + This partially reverts commit 03ca9020756a4e16f0294e5b35e9826ee6af2364 + in order to fix extreme slowdown when uploading to localhost via SFTP. + + I was able to repeat the issue on RHEL-7 on localhost only. It did not + occur when uploading via network and it did not occur on a RHEL-6 box + with the same version of libssh2. + + The problem was that sftp_read() used a read-ahead logic to figure out + the window_size, but sftp_packet_read() called indirectly from + sftp_write() did not use any read-ahead logic. + +- _libssh2_channel_write: client spins on write when window full + + When there's no window to "write to", there's no point in waiting for + the socket to become writable since it most likely just will continue to + be. + + Patch-by: ncm + Fixes #258 + +- _libssh2_channel_forward_cancel: avoid memory leaks on error + + Fixes #257 + +- _libssh2_packet_add: avoid using uninitialized memory + + In _libssh2_packet_add, called by _libssh2_packet_read, a call to + _libssh2_packet_send that is supposed to send a one-byte message + SSH_MSG_REQUEST_FAILURE would send an uninitialized byte upon re-entry + if its call to _send returns _EAGAIN. + + Fixes #259 + +- _libssh2_channel_forward_cancel: accessed struct after free + + ... and the assignment was pointless anyway since the struct was about + to be freed. Bug introduced in dde2b094. + + Fixes #268 + +Peter Stuge (2 Jun 2013) +- [Marc Hoersken brought this change] + + Fixed compilation using mingw-w64 + +- [Marc Hoersken brought this change] + + knownhost.c: use LIBSSH2_FREE macro instead of free + + Use LIBSSH2_FREE instead of free since + _libssh2_base64_encode uses LIBSSH2_ALLOC + +Daniel Stenberg (18 May 2013) +- [Matthias Kerestesch brought this change] + + libssh2_agent_init: init ->fd to LIBSSH2_INVALID_SOCKET + + ... previously it was left at 0 which is a valid file descriptor! + + Bug: https://trac.libssh2.org/ticket/265 + + Fixes #265 + +- userauth_password: pass on the underlying error code + + _libssh2_packet_requirev() may return different errors and we pass that + to the parent instead of rewriting it. + + Bug: http://libssh2.org/mail/libssh2-devel-archive-2013-04/0029.shtml + Reported by: Cosmin + +Peter Stuge (9 May 2013) +- [Marc Hoersken brought this change] + + libcrypt.c: Fix typo in _libssh2_rsa_sha1_sign() parameter type + +Kamil Dudka (4 May 2013) +- configure.ac: replace AM_CONFIG_HEADER with AC_CONFIG_HEADERS + + Reported by: Quintus + Bug: https://trac.libssh2.org/ticket/261 + +Guenter Knauf (12 Apr 2013) +- Fixed copyright string for NetWare build. + +Daniel Stenberg (9 Apr 2013) +- [Richard W.M. Jones brought this change] + + sftp: Add support for fsync (OpenSSH extension). + + The new libssh2_sftp_fsync API causes data and metadata in the + currently open file to be committed to disk at the server. + + This is an OpenSSH extension to the SFTP protocol. See: + + https://bugzilla.mindrot.org/show_bug.cgi?id=1798 + +- [Richard W.M. Jones brought this change] + + sftp: statvfs: Along error path, reset the correct 'state' variable. + +- [Richard W.M. Jones brought this change] + + sftp: seek: Don't flush buffers on same offset + + Signed-off-by: Richard W.M. Jones + +Guenter Knauf (9 Feb 2013) +- Updated dependency libs. + +- Fixed tool macro names. + +Daniel Stenberg (29 Nov 2012) +- [Seth Willits brought this change] + + compiler warnings: typecast strlen in macros + + ... in macro parameters to avoid compiler warnings about lost precision. + + Several macros in libssh2.h call strlen and pass the result directly to + unsigned int parameters of other functions, which warns about precision + loss because strlen returns size_t which is unsigned long on at least + some platforms (such as OS X). The fix is to simply typecast the + strlen() result to unsigned int. + +- libssh2.h: bump version to 1.4.4-DEV + +Version 1.4.3 (27 Nov 2012) + +Daniel Stenberg (27 Nov 2012) +- RELEASE-NOTES: fixed for 1.4.3 + +- sftp_read: return error if a too large package arrives + +Peter Stuge (13 Nov 2012) +- Only define _libssh2_dsa_*() functions when building with DSA support + +Guenter Knauf (8 Nov 2012) +- Added .def file to output. + +Kamil Dudka (1 Nov 2012) +- libssh2_hostkey_hash.3: update the description of return value + + The function returns NULL also if the hash algorithm is not available. + +Guenter Knauf (24 Oct 2012) +- Fixed mode acciedently committed. + +- Ignore generated file. + +- Added hack to make use of Makefile.inc. + + This should avoid further maintainance of the objects list. + +- Fixed MSVC NMakefile. + + Added missing source files; added resource for DLL. + +Kamil Dudka (22 Oct 2012) +- examples: use stderr for messages, stdout for data + + Reported by: Karel Srot + Bug: https://bugzilla.redhat.com/867462 + +- openssl: do not leak memory when handling errors + + ,.. in aes_ctr_init(). Detected by Coverity. + +- channel: fix possible NULL dereference + + ... in libssh2_channel_get_exit_signal(). Detected by Coverity. + +- Revert "aes: the init function fails when OpenSSL has AES support" + + This partially reverts commit f4f2298ef3635acd031cc2ee0e71026cdcda5864. + + We need to use the EVP_aes_???_ctr() functions in FIPS mode. + +- crypt: use hard-wired cipher block sizes consistently + +- openssl: do not ignore failure of EVP_CipherInit() + +- kex: do not ignore failure of libssh2_md5_init() + + The MD5 algorithm is disabled when running in FIPS mode. + +Daniel Stenberg (21 Aug 2012) +- [Peter Krempa brought this change] + + known_hosts: Fail when parsing unknown keys in known_hosts file. + + libssh2_knownhost_readfile() silently ignored problems when reading keys + in unsupported formats from the known hosts file. When the file is + written again from the internal structures of libssh2 it gets truntcated + to the point where the first unknown key was located. + + * src/knownhost.c:libssh2_knownhost_readfile() - return error if key + parsing fails + +- AUTHORS: synced with 42fec44c8a4 + + 31 recent authors added + +- [Dave Hayden brought this change] + + compression: add support for zlib@openssh.com + + Add a "use_in_auth" flag to the LIBSSH2_COMP_METHOD struct and a + separate "zlib@openssh.com" method, along with checking session->state + for LIBSSH2_STATE_AUTHENTICATED. Appears to work on the OpenSSH servers + I've tried against, and it should work as before with normal zlib + compression. + +- [Dmitry Smirnov brought this change] + + configure: gcrypt doesn't come with pkg-config support + + ... so use plain old -lgcrypt to the linker to link with it. + + Fixes #225 + +- sftp_read: Value stored to 'next' is never read + + Detected by clang-analyzer + +- publickey_init: errors are negative, fix check + + Detected by clang-analyzer. + +- [Maxime Larocque brought this change] + + session_free: wrong variable used for keeping state + + If libssh2_session_free is called without the channel being freed + previously by libssh2_channel_free a memory leak could occur. + + A mismatch of states variables in session_free() prevent the call to + libssh2_channel_free function. session->state member is used instead of + session->free_state. + + It causes a leak of around 600 bytes on every connection on my systems + (Linux, x64 and PPC). + + (Debugging done under contract for Accedian Networks) + + Fixes #246 + +Guenter Knauf (29 Jun 2012) +- Small NetWare makefile tweak. + +- Some small Win32 makefile fixes. + +Daniel Stenberg (19 Jun 2012) +- libssh2_userauth_publickey_fromfile_ex.3: mention publickey == NULL + +- comp_method_zlib_decomp: handle Z_BUF_ERROR when inflating + + When using libssh2 to perform an SFTP file transfer from the "JSCAPE MFT + Server" (http://www.jscape.com) the transfer failed. The default JSCAPE + configuration is to enforce zlib compression on SSH2 sessions so the + session was compressed. The relevant part of the debug trace contained: + + [libssh2] 1.052750 Transport: unhandled zlib error -5 + [libssh2] 1.052750 Failure Event: -29 - decompression failure + + The trace comes from comp_method_zlib_decomp() in comp.c. The "unhandled + zlib error -5" is the status returned from the zlib function + inflate(). The -5 status corresponds to "Z_BUF_ERROR". + + The inflate() function takes a pointer to a z_stream structure and + "inflates" (decompresses) as much as it can. The relevant fields of the + z_stream structure are: + + next_in - pointer to the input buffer containing compressed data + avail_in - the number of bytes available at next_in + next_out - pointer to the output buffer to be filled with uncompressed + data + avail_out - how much space available at next_out + + To decompress data you set up a z_stream struct with the relevant fields + filled in and pass it to inflate(). On return the fields will have been + updated so next_in and avail_in show how much compressed data is yet to + be processed and next_out and avail_out show how much space is left in + the output buffer. + + If the supplied output buffer is too small then on return there will be + compressed data yet to be processed (avail_in != 0) and inflate() will + return Z_OK. In this case the output buffer must be grown, avail_out + updated and inflate() called again. + + If the supplied output buffer was big enough then on return the + compressed data will have been exhausted (avail_in == 0) and inflate() + will return Z_OK, so the data has all been uncompressed. + + There is a corner case where inflate() makes no progress. That is, there + may be unprocessed compressed data and space available in the output + buffer and yet the function does nothing. In this case inflate() will + return Z_BUF_ERROR. From the zlib documentation and the source code it + is not clear under what circumstances this happens. It could be that it + needs to write multiple bytes (all in one go) from its internal state to + the output buffer before processing the next chunk of input but but + can't because there is not enough space (though my guesses as to the + cause are not really relevant). Recovery from Z_BUF_ERROR is pretty + simple - just grow the output buffer, update avail_out and call + inflate() again. + + The comp_method_zlib_decomp() function does not handle the case when + inflate() returns Z_BUF_ERROR. It treats it as a non-recoverable error + and basically aborts the session. + + Fixes #240 + +Guenter Knauf (12 Jun 2012) +- MinGW makefile tweaks. + + Use GNU tools when compiling on Linux. + Fixed dist and dev targets. + +- NetWare makefile tweaks. + + Changed to use Windows commandline tools instead of + GNU tools when compiling on Windows. Fixed dist and + dev targets. Enabled nlmconv error for unresolved + symbols. + +Daniel Stenberg (11 Jun 2012) +- Revert "config.rpath: generated file, no need to keep in git" + + This reverts commit 1ac7bd09cc685755577fb2c8829adcd081e7ab3c. + + This file still used by lib/*m4 functions so we need to keep the file + around. + +- BINDINGS: added PySsh2, a Python-ctypes binding + +Guenter Knauf (8 Jun 2012) +- Fixed MinGW debug build. + +Daniel Stenberg (5 Jun 2012) +- BINDINGS: Added the Cocoa/Objective-C one + + ... and sorted the bindings after the languages, alphabetically + + Reported by: Mike Abdullah + +- BINDINGS: document the bindings we know of + +Guenter Knauf (4 Jun 2012) +- Fixed LIBSSH2_INT64_T_FORMAT macro. + + Usually a format macro should hold the whole format, otherwise + it should be named a prefix. Also fixed usage of this macro in + scp.c for a signed var where it was used as prefix for unsigned. + +- Removed obsolete define from makefiles. + +- Renamed NetWare makefiles. + +- Renamed NetWare makefiles. + +- Synced MinGW makefiles with 56c64a6..39e438f. + + Also synced MinGW test makefile with b092696..f8cb874. + +Peter Stuge (30 May 2012) +- Revert "sftp: Don't send attrs.permissions on read-only SSH_FXP_OPEN" + + This reverts commit 04e79e0c798674a0796be8a55f63dd92e6877790. + +- sftp: Don't send attrs.permissions on read-only SSH_FXP_OPEN + + This works around a protocol violation in the ProFTPD 1.3.4 mod_sftp + server, as reported by Will Cosgrove in: + + http://libssh2.org/mail/libssh2-devel-archive-2012-05/0079.shtml + + Based on a suggested fix by TJ Saunders in: + + http://libssh2.org/mail/libssh2-devel-archive-2012-05/0104.shtml + +Guenter Knauf (28 May 2012) +- Try to detect OpenSSL build type automatically. + + Also fixed recently added libgdi32 linkage which is only + required when OpenSSL libs are linked statically. + +Daniel Stenberg (25 May 2012) +- config.rpath: generated file, no need to keep in git + +Guenter Knauf (22 May 2012) +- Updated dependency libary versions. + +Daniel Stenberg (18 May 2012) +- 1.4.3: towards the future + +Version 1.4.2 (18 May 2012) + +Daniel Stenberg (18 May 2012) +- RELEASE-NOTES: synced with 92a9f952794 + +Alexander Lamaison (15 May 2012) +- win32/libssh2_config.h: Remove hardcoded #define LIBSSH2_HAVE_ZLIB. + + Rationale: Everything else in this file states a fact about the win32 + platform that is unconditional for that platform. There is nothing + unconditional about the presence of zlib. It is neither included with + Windows nor with the platform SDK. Therefore, this is not an appropriate + place to assert its presence. Especially as, once asserted, it cannot be + overridden using a compiler flag. + + In contrast, if it is omitted, then it can easily be reasserted by adding + a compiler flag defining LIBSSH2_HAVE_ZLIB. + +Daniel Stenberg (14 May 2012) +- RELEASE-NOTES: synced with 69a3354467c + +- _libssh2_packet_add: SSH_MSG_CHANNEL_REQUEST default to want_reply + + RFC4254 says the default 'want_reply' is TRUE but the code defaulted to + FALSE. Now changed. + + Fixes #233 + +- gettimeofday: no need for a replacement under cygwin + + Fixes #224 + +Alexander Lamaison (13 May 2012) +- Prevent sftp_packet_read accessing freed memory. + + sftp_packet_add takes ownership of the packet passed to it and (now that we + handle zombies) might free the packet. sftp_packet_read uses the packet type + byte as its return code but by this point sftp_packet_add might have freed + it. This change fixes the problem by caching the packet type before calling + sftp_packet_add. + + I don't understand why sftp_packet_read uses the packet type as its return + code. A future change might get rid of this entirely. + +Daniel Stenberg (12 May 2012) +- sftp_packet_flush: flush zombies too + + As this function is called when the SFTP session is closed, it needs to + also kill all zombies left in the SFTP session to avoid leaking memory + just in case some zombie would still be in there. + +- sftp_packetlist_flush: zombies must not have responses already + + When flushing the packetlist, we must only add the request as a zombie + if no response has already been received. Otherwise we could wrongly + make it a zombie even though the response was already received and then + we'd get a zombie stuck there "forever"... + +- sftp_read: on EOF remove packet before flush + + Since the sftp_packetlist_flush() function will move all the existing + FXP_READ requests in this handle to the zombie list we must first remove + this just received packet as it is clearly not a zombie. + +- sftp_packet_require: sftp_packet_read() returning 0 is not an error + + Exactly as the comment in the code said, checking the return code from + sftp_packet_read() with <= was wrong and it should be < 0. With the new + filtering on incoming packets that are "zombies" we can now see this + getting zero returned. + +- sftp_packetlist_flush: only make it zombie if it was sent + + The list of outgoing packets may also contain packets that never were + sent off and we better not make them zombies too. + +- [Alexander Lamaison brought this change] + + Mark outstanding read requests after EOF as zombies. + + In order to be fast, sftp_read sends many read requests at once. With a small + file, this can mean that when EOF is received back, many of these requests are + still outstanding. Responses arriving after we close the file and abandon the + file handle are queued in the SFTP packet queue and never collected. This + causes transfer speed to drop as a progressively longer queue must be searched + for every packet. + + This change introduces a zombie request-ID list in the SFTP session that is + used to recognise these outstanding requests and prevent them being added to + the queue. -- SFTP: fix memory leaks +Peter Stuge (23 Apr 2012) +- [Rafael Kitover brought this change] + + Update win32/GNUmakefile to use OpenSSL 1.0.1a - Make sure that we cleanup remainders when the handle is closed and when - the subsystem is shutdown. + libcrypto on win32 now depends on gdi32.dll, so move the OpenSSL LDLIBS + block to before the compiler definitions, so that libcrypto gets added + first, and then add -lgdi32 into the following common LDLIBS for gcc. + +Guenter Knauf (23 Apr 2012) +- Changed 'Requires' to 'Requires.private'. - Existing flaw: if a single handle sends packets that haven't been - replied to yet at the time when the handle is closed, those packets will - arrive later and end up in the generic packet brigade queue and they - will remain in there until flushed. They will use unnecessary memory, - make things slower and they will ruin the SFTP handling if the - request_id counter ever wraps (highly unlikely to every happen). + Only static builds need to link against the crypto libs. -- sftp_close_handle: packet list is generic +- Fixed 'Requires:' names. - Fix comment, simplify the loop logic + The 'Requires:' line lists the names of the .pc files. -- sftp_read: pipeline reads +- Added 'Requires:' line to libssh2.pc. - The SFTP read function now does transfers the same way the SFTP write - function was made to recently: it creates a list of many outgoing - FXP_READ packets that each asks for a small data chunk. The code then - tries to keep sending read request while collecting the acks for the - previous requests and returns the received data. + This is necessary so that other libs which lookup libssh2 info + via pkg-config can add the right crypto lib dependencies. -- sftp_write: removed unused variable +- Updated dependency lib versions. -- _libssh2_channel_close: don't call transport read if disconnected +Peter Stuge (18 Apr 2012) +- configure.ac: Add option to disable build of the example applications - The loop that waits for remote.close to get set may end up looping - forever since session->socket_state gets set to - LIBSSH2_SOCKET_DISCONNECTED by the packet_add() function called from the - transport_read() function and after having been set to - LIBSSH2_SOCKET_DISCONNECTED, the transport_read() function will only - return 0. + Examples are built by default. Any of the following options on the + configure command line will skip building them: - Bug: http://trac.libssh2.org/ticket/198 + --disable-examples-build + --enable-examples-build=no + --enable-examples-build=false -- libssh2_sftp_seek64: new man page +- userauth.c: fread() from public key file to correctly detect any errors - Split off libssh2_sftp_seek64 from the libssh2_sftp_seek man page, and - mentioned that we consider the latter deprecated. Also added a mention - about the dangers of doing seek during writing or reading. + If the filename parameter for file_read_publickey() was the name of a + directory instead of a file then libssh2 would spin trying to fgetc() + from the FILE * for the opened directory when trying to determine the + length of the encoded public key, since fgetc() can't report errors. + + Use fread() instead to correctly detect this error condition along + with many others. + + This fixes the problem reported in + http://www.libssh2.org/mail/libssh2-devel-archive-2012-04/0021.shtml + + Reported-by: Oleksiy Zagorskyi -- sftp_seek: fix +- Return LIBSSH2_ERROR_SOCKET_DISCONNECT on EOF when reading banner + +Guenter Knauf (17 Apr 2012) +- Fixed copyright year. + +- Updated dependency lib versions in static makefiles. + +Daniel Stenberg (6 Apr 2012) +- version: bump to 1.4.2 - The new SFTP write code caused a regression as the seek function no - longer worked as it didn't set the write position properly. + We're on the 1.4.2 track now (at least) + +Version 1.4.1 (4 Apr 2012) + +Daniel Stenberg (4 Apr 2012) +- RELEASE-NOTES: updated for 1.4.1 release + +- always do "forced" window updates - It should be noted that seeking is STRONGLY PROHIBITED during upload, as - the upload magic uses two different offset positions and the multiple - outstanding packets etc make them sensitive to change in the midst of - operations. + When calling _libssh2_channel_receive_window_adjust() internally, we now + always use the 'force' option to prevent libssh2 to avoid sending the + update if the update isn't big enough. - This functionality was just verified with the new example code - sftp_append. This bug was filed as bug #202: + It isn't fully analyzed but we have seen corner cases which made a + necessary window update not get send due to this and then the other side + doesn't send data our side then sits waiting for forever. + +- channel_read: force window adjusts! - Bug: http://trac.libssh2.org/ticket/202 + if there's not enough room to receive the data that's being requested, + the window adjustment needs to be sent to the remote and thus the force + option has to be used. _libssh2_channel_receive_window_adjust() would + otherwise "queue" small window adjustments for a later packet but that + is really terribly for the small buffer read that for example is the + final little piece of a very large file as then there is no logical next + packet! + + Reported by: Armen Babakhanian + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0130.shtml -- sftp_append: new example doing SFTP append +- [Paul Howarth brought this change] -- MAX_SFTP_OUTGOING_SIZE: 30000 + aes: the init function fails when OpenSSL has AES support - I ran SFTP upload tests against localhost. It showed that to make the - app reach really good speeds, I needed to do a little code tweak and - change MAX_SFTP_OUTGOING_SIZE from 4000 to 30000. The tests I did before - with the high latency tests didn't show any real difference whatever I - had that size set to. + The internal init function only worked fine when the configure script + didn't detect the OpenSSL AES_CTR function! - This number is the size in bytes that libssh2 cuts off the large input - buffer and sends off as an individual sftp packet. + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0111.shtml + Reported by: Paul Howarth -- sftp_write_sliding.c: new example +- [Matthew Booth brought this change] + + transport_send: Finish in-progress key exchange before sending data - This is an example that is very similar to sftp_write_nonblock.c, with - the exception that this uses + _libssh2_channel_write() first reads outstanding packets before writing + new data. If it reads a key exchange request, it will immediately start + key re-exchange, which will require sending a response. If the output + socket is full, this will result in a return from + _libssh2_transport_read() of LIBSSH2_ERROR_EAGAIN. In order not to block + a write because there is no data to read, this error is explicitly + ignored and the code continues marshalling a packet for sending. When it + is sent, the remote end immediately drops the connection because it was + expecting a continuation of the key exchange, but got a data packet. - 1 - a larger upload buffer + This change adds the same check for key exchange to + _libssh2_transport_send() that is in _libssh2_transport_read(). This + ensures that key exchange is completed before any data packet is sent. + +- channel_write: acknowledge transport errors - 2 - a sliding buffer mechnism to allow the app to keep sending lots of - data to libssh2 without having to first drain the buffer. + When draining data off the socket with _libssh2_transport_read() (which + in turn has to be done so that we can be sure to have read any possible + window-increasing packets), this code previously ignored errors which + could lead to nasty loops. Now all error codes except EAGAIN will cause + the error to be returned at once. - These are two key issues to make libssh2 SFTP uploads really perform - well at this point in time. + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0068.shtml + Reported by: Matthew Booth -- cpp: s/#elsif/#elif +- [Steven Dake brought this change] + + In examples/x11.c, Make sure sizeof passed to read operation is correct - This looks like a typo as #elsif is not really C... + sizeof(buf) expands to 8 or 4 (since its a pointer). This variable may + have been static in the past, leading to this error. + + Signed-off-by: Steven Dake -- _libssh2_channel_write: revert channel_write() use +- [Steven Dake brought this change] + + Fix suspicious sizeof usage in examples/x11.c - The attempts made to have _libssh2_channel_write() accept larger pieces - of data and split up the data by itself into 32700 byte chunks and pass - them on to channel_write() in a loop as a way to do faster operations on - larger data blocks was a failed attempt. + In the x11 example, sizeof(buf) = 8UL (on x86_64), when this should + probably represent the buffer size available. I am not sure how to + test that this change is actually correct, however. - The reason why it is difficult: + Signed-off-by: Steven Dake + +- sftp_packet_read: follow-up fix for EAGAIN/window adjust - The API only allows EAGAIN or a length to be returned. When looping over - multiple blocks to get sent, one block can get sent and the next might - not. And yet: when transport_send() has returned EAGAIN we must not call - it again with new data until it has returned OK on the existing data it - is still working on. This makes it a mess and we do get a much easier - job by simply returning the bytes or EAGAIN at once, as in the EAGAIN - case we can assume that we will be called with the same arguments again - and transport_send() will be happy. + The commit in 7194a9bd7ba45 wasn't complete. This change makes sure + variables are initialized properly before used in the EAGAIN and window + adjust cases. + +- sftp_packet_add: use named error code instead of number + +- sftp_packet_add: verify the packet before accepting it - Unfortunately, I think we take a small performance hit by not being able - to do this. + In order to bail out as quickly as possible when things are wrong and + out of sync, make sure the SFTP message is one we understand. -- ssh2_echo: new example +- SFTP: preserve the original error code more + + Lots of places in the code translated the original error into the more + generic LIBSSH2_ERROR_SOCKET_TIMEOUT but this turns out to distort the + original error reason a lot and makes tracking down the real origin of a + problem really hard. This change makes the original error code be + preserved to a larger extent when return up to the parent function. + +- sftp_packet_read: adjust window size as necessary + + Commit 03ca9020756 tried to simplify the window sizing logic but broke + SFTP readdir as there was no window sizing code left there so large + directory listings no longer worked. + + This change introduces window sizing logic to the sftp_packet_read() + function so that it now tells the remote about the local size having a + window size that suffice when it is about to ask for directory data. + + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2012-03/0069.shtml + Reported by: Eric + +- [Steven Dake brought this change] + + Tell C compiler we don't care about return code of libssh2_init + + The call of libssh2_init returns a return code, but nothing could be done + within the _libssh2_init_if_needed execution path. + + Signed-off-by: Steven Dake + +- [Steven Dake brought this change] + + Add comment indicating a resource leak is not really a resource leak + + While possibly obvious to those investigating the code, coverity complains + about this out of scope leak. + + Signed-off-by: Steven Dake + +- [Steven Dake brought this change] + + Use safer snprintf rather then sprintf in scp_send() - This is a new example snippet. The code is largely based on ssh2_exec, - and is written by Tommy Lindgren. I edited it into C90 compliance and to - conform to libssh2 indent style and some more. + Signed-off-by: Steven Dake -- send_existing: return after send_existing +- [Steven Dake brought this change] + + Use safer snprintf rather then sprintf in scp_recv() - When a piece of data is sent from the send_existing() function we must - make the parent function return afterwards. Otherwise we risk that the - parent function tries to send more data and ends up getting an EGAIN for - that more data and since it can only return one return code it doesn't - return info for the successfully sent data. + While the buffer is indeed allocated to a safe length, better safe then sorry. - As this change is a regression I now added a larger comment explaining - why it has to work like this. + Signed-off-by: Steven Dake -- _libssh2_channel_write: count resent data as written - - In the logic that resends data that was kept for that purpose due to a - previous EAGAIN, the data was not counted as sent causing badness. +- [Steven Dake brought this change] -Peter Stuge (13 Nov 2010) -- Use fprintf(stderr, ) instead of write(2, ) for debugging + use snprintf in knownhost_writeline() rather then sprintf + + Although the function checks the length, if the code was in error, there + could potentially be a buffer overrun with the use of sprintf. Instead replace + with snprintf. + + Signed-off-by: Steven Dake -- session/transport: Correctly handle when _libssh2_send() returns -EAGAIN +- [Steven Dake brought this change] -- src/agent.c: Simplify _libssh2_send() error checking ever so slightly + Add tracing to print packets left on session at libssh2_session_free + + Signed-off-by: Steven Dake -Daniel Stenberg (12 Nov 2010) -- send/recv: use _libssh2_recv and _libssh2_send now +Peter Stuge (2 Mar 2012) +- Define and use LIBSSH2_INVALID_SOCKET instead of INVALID_SOCKET - Starting now, we unconditionally use the internal replacement functions - for send() and recv() - creatively named _libssh2_recv() and - _libssh2_send(). + INVALID_SOCKET is a special value in Windows representing a + non-valid socket identifier. We were #defining this to -1 on + non-Windows platforms, causing unneccessary namespace pollution. + Let's have our own identifier instead. - On errors, these functions return the negative 'errno' value instead of - the traditional -1. This design allows systems that have no "natural" - errno support to not have to invent it. It also means that no code - outside of these two transfer functions should use the errno variable. + Thanks to Matt Lawson for pointing this out. -- channel_write: move some logic to _libssh2_channel_write - - Some checks are better done in _libssh2_channel_write just once per - write instead of in channel_write() since the looping will call the - latter function multiple times per _libssh2_channel_write() invoke. +- nw/Makefile.netware: Fix project name typo to avoid needless confusion -- sftp_write: handle "left over" acked data +- example/x11: Set raw terminal mode manually instead of with cfmakeraw() - The SFTP handle struct now buffers number of acked bytes that haven't - yet been returned. The way this is used is as following: + OpenSolaris has no cfmakeraw() so to make the example more portable + we simply do the equivalent operations on struct termios ourselves. - 1. sftp_write() gets called with a buffer of let say size 32000. We - split 32000 into 8 smaller packets and send them off one by one. One of - them gets acked before the function returns so 4000 is returned. + Thanks to Tom Weber for reporting this problem, and finding a solution. + +Daniel Stenberg (17 Feb 2012) +- sftp_write: cannot return acked data *and* EAGAIN - 2. sftp_write() gets called again a short while after the previous one, - now with a much smaller size passed in to the function. Lets say 8000. - In the mean-time, all of the remaining packets from the previous call - have been acked (7*4000 = 28000). This function then returns 8000 as all - data passed in are already sent and it can't return any more than what - it got passed in. But we have 28000 bytes acked. We now store the - remaining 20000 in the handle->u.file.acked struct field to add up in - the next call. + Whenever we have acked data and is about to call a function that *MAY* + return EAGAIN we must return the number now and wait to get called + again. Our API only allows data *or* EAGAIN and we must never try to get + both. + +Peter Stuge (13 Feb 2012) +- example/x11: Build only when sys/un.h is found by configure - 3. sftp_write() gets called again, and now there's a backlogged 20000 - bytes to return as fine and that will get skipped from the beginning - of the buffer that is passed in. + The example can't be built on systems without AF_UNIX sockets. -- sftp_write: polished and simplified +Daniel Stenberg (10 Feb 2012) +- [Alexander Lamaison brought this change] + + Simplified sftp_read. - Removed unnecessary struct fields and state changes within the function. + Removed the total_read variable that originally must have tracked how + much data had been written to the buffer. With non-blocking reads, we + must return straight away once we have read data into the buffer so this + variable served not purpose. - Made the loop that checks for ACKs only check chunks that were fully - sent. + I think it was still hanging around in case the initial processing of + 'leftover' data meant we wrote to the buffer but this case, like the + others, must return immediately. Now that it does, the last remaining + need for the variable is gone. -- SCP: on failure, show the numerical error reason +- [Alexander Lamaison brought this change] + + Cleaned up sftp_read and added more explanation. - By calling libssh2_session_last_errno() + Replaced the gotos which were implementing the state machine with + a switch statement which makes the states more explicit. -- SFTP: provide the numerical error reason on failure +- sftp_read: avoid data *and* EAGAIN + + Whenever we have data and is about to call a function that *MAY* return + EAGAIN we must return the data now and wait to get called again. Our API + only allows data *or* EAGAIN and we must never try to get both. -- SCP: clean up failure treatment +Peter Stuge (2 Feb 2012) +- Add a tcpip-forward example which demonstrates remote port forwarding + +- libssh2.h: Add missing prototype for libssh2_session_banner_set() + +- example/subsystem_netconf.c: Return error when read buffer is too small - When SCP send or recv fails, it gets a special message from the server - with a warning or error message included. We have no current API to - expose that message but the foundation is there. Removed unnecessary use - of session struct fields. + Also remove a little redundancy in the read loop condition. -- sftp_write: enlarge buffer to perform better +- example/subsystem_netconf.c: Add a missing newline in an error message -- packets: code cleanup +- Fix undefined reference to _libssh_error in libgcrypt backend - I added size checks in several places. I fixed the code flow to be easier - to read in some places. + Commit 209de22299b4b58e582891dfba70f57e1e0492db introduced a function + call to a non-existing function, and since then the libgcrypt backend + has not been buildable. + +Version 1.4.0 (31 Jan 2012) + +Daniel Stenberg (31 Jan 2012) +- RELEASE-NOTES: synced with 6bd584d29 for 1.4.0 + +- s/1.3.1/1.4.0 - I removed unnecessary zeroing of structs. I removed unused struct fields. + We're bumping the minor number -- LIBSSH2_CALLBACK_MACERROR: clarify return code use +- [Jernej Kovacic brought this change] -- _libssh2_userauth_publickey: avoid shadowing + libssh2_session_supported_algs: fix compiler warning -- packet: avoid shadowing global symbols +- [Jernej Kovacic brought this change] -- sftp_readdir: avoid shadowing + session_supported_algs docs: added an example -- shadowing: don't shadow the global compress +- [Gellule Xg brought this change] -- _libssh2_packet_add: turn ifs into a single switch + sftp-seek: clear EOF flag + + Set the EOF flag to False when calling seek64 to be able to get some + data back on a following read -- _libssh2_packet_add: check SSH_MSG_GLOBAL_REQUEST packet +- [Peter Krempa brought this change] -- _libssh2_packet_add: SSH_MSG_DEBUG length checks + userauth: Provide more informations if ssh pub key extraction fails - Verify lengths before using them. Read always_display from the correct - index. Don't copy stuff around just to provide zero-termination of the - strings. + If the function that extracts/computes the public key from a private key + fails the errors it reports were masked by the function calling it. This + patch modifies the key extraction function to return errors using + _libssh_error() function. The error messages are tweaked to contain + reference to the failed operaton in addition to the reason. + + * AUTHORS: - add my name + * libgcrypt.c: _libssh2_pub_priv_keyfile(): - return a more verbose + error using + _libssh2_error() func. + * openssl.c: - modify call graph of _libssh2_pub_priv_keyfile() to use + _libssh2_error for error reporting(); + * userauth.c: - tweak functions calling _libssh2_pub_priv_keyfile() not + to shadow error messages -- _libssh2_packet_add: SSH_MSG_IGNORE skip memmove +- TODO: remove issues we (sort of) did already + +- ssh2_exec: skip error outputs for EAGAIN - There's no promise of a zero termination of the data in the callback so - no longer perform ugly operation in order to provide it. + Since the example uses non-blocking mode, it will just flood the output + with this "nonsense" error. -- _libssh2_packet_add: SSH_MSG_DISCONNECT length checks +Guenter Knauf (30 Nov 2011) +- Some NetWare makefile tweaks. + +Daniel Stenberg (18 Nov 2011) +- LIBSSH2_SFTP_PACKET_MAXLEN: increase to 80000 - Verify lengths before trying to read data. + Some SFTP servers send SFTP packets larger than 40000. Since the limit + is only present to avoid insane sizes anyway, we can easily bump it. + + The define was formerly in the public header libssh2_sftp.h but served + no external purpose and was moved into the source dir. + + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-11/0004.shtml + Reported by: Michael Harris -- indent: break lines at 80 columns +Alexander Lamaison (18 Nov 2011) +- [Peter Krempa brought this change] -- SSH_MSG_CHANNEL_OPEN_FAILURE: used defined values + knownhost_check(): Don't dereference ext if NULL is passed - We don't like magic numbers in the code. Now the acceptable failure - codes sent in the SSH_MSG_CHANNEL_OPEN_FAILURE message are added as - defined values in the private header file. + Documentation for libssh2_knownhost_checkp() and related functions + states that the last argument is filled with data if non-NULL. + + "knownhost if set to non-NULL, it must be a pointer to a 'struct + libssh2_knownhost' pointer that gets filled in to point to info about a + known host that matches or partially matches." + + In this function ext is dereferenced even if set to NULL, causing + segfault in applications not needing the extra data. -- sftp_write: don't return EAGAIN if no EAGAIN was received +Daniel Stenberg (11 Nov 2011) +- [Peter Krempa brought this change] + + knownhost_add: Avoid dereferencing uninitialized memory on error path. - This function now only returns EAGAIN if a lower layer actually returned - EAGAIN to it. If nothing was acked and no EAGAIN was received, it will - now instead return 0. + In function knownhost_add, memory is alocated for a new entry. If normal + alocation is used, memory is not initialized to 0 right after, but a + check is done to verify if correct key type is passed. This test is done + BEFORE setting the memory to null, and on the error path function + free_host() is called, that tries to dereference unititialized memory, + resulting into a glibc abort(). + + * knownhost.c - knownhost_add(): - move typemask check before alloc -- _libssh2_wait_socket: detect nothing-to-wait-for +- windows build: add define to avoid compiler warning - If _libssh2_wait_socket() gets called but there's no direction set to - wait for, this causes a "hang". This code now detects this situation, - set a 1 second timeout instead and outputs a debug output about it. + A recent mingw compiler has started to complain on "#warning Please + include winsock2.h before windows.h" unless the magic define is set + first. + + Reported by: Vincent Torri + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-10/0064.shtml + +Henrik Nordstrom (31 Oct 2011) +- [Vincent Torri brought this change] -- decomp: remove the free_dest argument + Correct Windows include file name case, simplifying cross-compilation - Since the decompress function ALWAYS returns allocated memory we get a - lot simpler code by removing the ability to return data unallocated. - -- decomp: cleaned off old compression stuff + When cross compiling to Windows, libssh2.h include Windows header files + with upper case filenames : BaseTsd.h and WinSock2.h. - I cleared off legacy code from when the compression and decompression - functions were a single unified function. Makes the code easier to read - too. - -- [TJ Saunders brought this change] + These files have lowercase names with mingw-w64 (iirc, it's the same with + mingw). And as on Windows, being lowercase or uppercase does not matter. - decomp: increase decompression buffer sizes +Daniel Stenberg (25 Oct 2011) +- [Jernej Kovacic brought this change] -- [TJ Saunders brought this change] + libssh2_session_supported_algs: added - zlib: Add debug tracing of zlib errors +- [Kamil Dudka brought this change] -- sftp_packet_read: handle partial reads of the length field + example/sftp_RW_nonblock: do not ignore LIBSSH2_ERROR_EAGAIN - SFTP packets come as [32 bit length][payload] and the code didn't - previously handle that the initial 32 bit field was read only partially - when it was read. + Bug: https://bugzilla.redhat.com/745420 -- [Jasmeet Bagga brought this change] +Peter Stuge (5 Oct 2011) +- example/ssh2_agent: Print host key fingerprint before authentication + + Also moves the comment about not being authenticated to before the + agent authentication takes place, so that it better matches the code. - kex_agree_hostkey: fix NULL pointer derefence +Daniel Stenberg (29 Sep 2011) +- OpenSSL EVP: fix threaded use of structs - While setting up the session, ssh tries to determine the type of - encryption method it can use for the session. This requires looking at - the keys offered by the remote host and comparing these with the methods - supported by libssh2 (rsa & dss). To do this there is an iteration over - the array containing the methods supported by libssh2. + Make sure we don't clear or reset static structs after first init so + that they work fine even when used from multiple threads. Init the + structs in the global init. - If there is no agreement on the type of encryption we come to the 3rd - entry of the hostkeyp array. Here hostkeyp is valid but *hostkep is - NULL. Thus when we dereference that in (*hostkeyp)->name there is a - crash - -- _libssh2_transport_send: remove dead assignment + Help and assistance by: John Engstrom - 'data' isn't accessed beyond this point so there's no need to assign it. + Fixes #229 (again) -- scp_recv: remove dead assignment +- openssl: don't init static structs differently - Instead of assigning a variable we won't read, we now use the more - explicit (void) prefix. + make_ctr_evp() is changed to take a struct pointer, and then each + _libssh2_EVP_aes_[keylen]_ctr function is made to pass in their own + static struct + + Reported by: John Engstrom + Fixes #229 -- sftp_write: removed superfluous assignment +Guenter Knauf (27 Sep 2011) +- Removed obsolete include path. -- bugfix: avoid use of uninitialized value +Daniel Stenberg (21 Sep 2011) +- read_state: clear the state variable better + + Set read_state back to idle before trying to send anything so that if + the state somehow is wrongly set. + + Also, avoid such a case of confusion by resetting the read_state when an + sftp handle is closed. -- sftp_packet_require: propagate error codes better +- sftp_read: remove leftover fprintf - There were some chances that they would cause -1 to get returned by - public functions and as we're hunting down all such occurances and since - the underlying functions do return valuable information the code now - passes back proper return codes better. + Reported by: Alexander Lamaison -- [Alfred Gebert brought this change] +- sftp.h: fix the #ifdef to prevent multiple inclusions - fix memory leaks (two times cipher_data) for each sftp session +- sftp_read: use a state variable to avoid bad writes + + When a channel_write call has gotten an EAGAIN back, we try harder to + continue the same write in the subsequent invoke. -- libssh2_userauth_authenticated: make it work as documented +- window_size: explicit adjustments only - The man page clearly says it returns 1 for "already authenticated" but - the code said non-zero. I changed the code to use 1 now, as that is also - non-zero but it gets the benefit that it now matches the documentation. + Removed the automatic window_size adjustments from + _libssh2_channel_read() and instead all channel readers must now make + sure to enlarge the window sizes properly themselves. - Using 1 instead of non-zero is better for two reasons: + libssh2_channel_read_ex() - the public function, now grows the window + size according to the requested buffer size. Applications can still opt + to grow the window more on demand. Larger windows tend to give higher + performance. - 1. We have the opportunity to introduce other return codes in the future for - things like error and what not. - 2. We don't expose the internal bitmask variable value. + sftp_read() now uses the read-ahead logic to figure out a window_size. -- userauth_keyboard_interactive: fix indent +- libssh2.h: bump the default window size to 256K -- [Alfred Gebert brought this change] +- libssh2_userauth_keyboard_interactive.3: fix man warning + + It seemed to occur due to the excessive line length - fix memory leak in userauth_keyboard_interactive() +- [Mikhail Gusarov brought this change] + + Add missing .gitignore entries + +- [Mikhail Gusarov brought this change] + + Add manpage syntax checker to 'check' target - First I wanted to free the memory in session_free() but then - I had still memory leaks because in my test case the function - userauth_keyboard_interactive() is called twice. It is called - twice perhaps because the server has this authentication - methods available: publickey,gssapi-with-mic,keyboard-interactive - The keyboard-interactive method is successful. + In virtually every libssh2 release Debian's lintian catches syntax errors in + manpages. Prevent it by checking manpages as a part of testsuite. -- dist: include sftp.h in dist archives +- libssh2_banner_set.3: fix nroff syntax mistake -Simon Josefsson (27 Oct 2010) -- Update header to match new function prototype, see c48840ba88. +Guenter Knauf (10 Sep 2011) +- Use predefined resource compiler macro. -Daniel Stenberg (26 Oct 2010) -- bugfixes: the transport rearrange left some subtle flaws now gone +- Added casts to silent compiler warnings. -- libssh2_userauth_publickey_fromfile_ex.3: cleaned up looks +- Fixed uint64_t printf. -- libssh2_userauth_publickey: add man page - - I found an undocumented public function and we can't have it like - that. The description here is incomplete, but should serve as a template - to allow filling in... +- Fixed macro function signatures. -- libssh2_sftp_write.3: added blurb about the "write ahead" - - Documented the new SFTP write concept +- NetWare makefile tweaks. -- sftp_close_handle: free any trailing write chunks +- Removed unused var. -- _libssh2_channel_write: fix warnings +- Added 2 samples not mentioned. -- SFTP: bufgix, move more sftp stuff to sftp.h - - The sftp_write function shouldn't assume that the buffer pointer will be - the same in subsequent calls, even if it assumes that the data already - passed in before haven't changed. - - The sftp structs are now moved to sftp.h (which I forgot to add before) +- Dont build x11 sample with MinGW. -- SFTP: use multiple outgoing packets when writing - - sftp_write was rewritten to split up outgoing data into multiple packets - and deal with the acks in a more asynchronous manner. This is meant to - help overcome latency and round-trip problems with the SFTP protocol. +- Fixed executable file description. -- TODO: implemented a lot of the ideas now +- Removed unused var. -- _libssh2_channel_write: removed 32500 size limit +- Kill stupid gcc 3.x uninitialized warning. + +- Build all examples. + +- More MinGW makefile tweaks. - Neither _libssh2_channel_write nor sftp_write now have the 32500 size - limit anymore and instead the channel writing function now has its own - logic to send data in multiple calls until everything is sent. + Renamed *.mingw makefiles to GNUmakefile since GNU make picks these + up automatically, and therefore win32/Makefile removed. -- send_existing: don't tell parent to return when drained +- Removed forgotten WINSOCK_VERSION defines. + +Daniel Stenberg (9 Sep 2011) +- libssh2_session_startup(3) => libssh2_session_handshake(3) - That will just cause unnecessary code execution. + Propagate for the current function in docs and examples. + libssh2_session_startup() is deprecated. -- _libssh2_channel_write: general code cleanup +- libssh2_banner_set => libssh2_session_banner_get - simplified the function and removed some unused struct fields + Marked the old function as deprecated. Added the new name in the correct + name space with the same arguments and functionality. -- _libssh2_transport_send: replaces _libssh2_transport_write +- new function: libssh2_session_banner_get - The new function takes two data areas, combines them and sends them as a - single SSH packet. This allows several functions to allocate and copy - less data. + Returns the banner from the server handshake - I also found and fixed a mixed up use of the compression function - arguments that I introduced in my rewrite in a recent commit. + Fixes #226 -- scp_write_nonblock: use select() instead of busyloop - - Make this example nicer by not busylooping. +- libssh2.h: bump version to 1.4.0 for new function(s) -- send_existing: clear olen when the data is sent off +- remove embedded CVS/svn tags -- _libssh2_transport_write: allow 256 extra bytes around the packet +- [liuzl brought this change] -- _libssh2_transport_write: remade to send without malloc + API add:libssh2_sftp_get_channel + + Return the channel of sftp, then caller can + control the channel's behavior. + + Signed-off-by: liuzl -- compress: compression disabled by default +- _libssh2_channel_read: react on errors from receive_window_adjust - We now allow libssh2_session_flag() to enable compression with a new - flag and I added documentation for the previous LIBSSH2_FLAG_SIGPIPE - flag which I wasn't really aware of! + Previously the function would ignore all errors except for EAGAIN. -- comp: split the compress function +- sftp_read: extend and clarify the documentation + +- sftp_read: cap the read ahead maximum amount - It is now made into two separate compress and decompress functions. In - preparation for upcoming further modficications. + Now we only go up to LIBSSH2_CHANNEL_WINDOW_DEFAULT*30 bytes SFTP read + ahead, which currently equals 64K*30 == 1966080 bytes. -Dan Fandrich (20 Oct 2010) -- Added header file to allow compiling in older environments +- _libssh2_channel_read: fix non-blocking window adjusting + + If EAGAIN is returned when adjusting the receive window, we must not + read from the transport directly until we've finished the adjusting. -Daniel Stenberg (20 Oct 2010) -- TODO: add a possible new API for SFTP transfers +Guenter Knauf (8 Sep 2011) +- Fix for systems which need sys/select.h. -- TODO: "New Transport API" added +- The files were not gone but renamed ... -- TODO: add buffering plans +Daniel Stenberg (6 Sep 2011) +- sftp_read: added documenting comment + + Taken from some recent email conversations I added some descriptions of + the logic in sftp_read() to aid readers. -Simon Josefsson (13 Oct 2010) -- Mention libssh2_channel_get_exit_signal and give kudos. +- 1.3.1: start the work + +Version 1.3.0 (6 Sep 2011) -- [Tommy Lindgren brought this change] +Daniel Stenberg (6 Sep 2011) +- Makefile.am: the Makefile.win32 files are gone - Add libssh2_channel_get_exit_signal man page. +- RELEASE-NOTES: updated for 1.3.0 + +- sftp_read: a short read is not end of file - Signed-off-by: Simon Josefsson + A returned READ packet that is short will now only reduce the + offset. + + This is a temporary fix as it is slightly better than the previous + approach but still not very good. -- [Tommy Lindgren brought this change] +- [liuzl brought this change] - Add libssh2_channel_get_exit_signal. + _libssh2_packet_add: adjust window size when truncating - Signed-off-by: Simon Josefsson + When receiving more data than what the window size allows on a + particular channel, make sure that the window size is adjusted in that + case too. Previously it would only adjust the window in the non-error + case. -- Add libssh2_free man page and fix typo. +Guenter Knauf (29 Aug 2011) +- Silent compiler warning with MinGW64. -- Add libssh2_free. +- Fixed link to native Win32 awk tool. -Daniel Stenberg (11 Oct 2010) -- scp_recv: improved treatment of channel_read() returning zero +- Renamed MinGW makefiles. + +- Some MinGW makefile tweaks. - As a zero return code from channel_read() is not an error we must make - sure that the SCP functions deal with that properly. channel_read() - always returns 0 if the channel is EOFed already so we check for EOF - after 0-reads to be able to return error properly. + Enable build without GNU tools and with MinGW64 compiler. -- libssh2_session_methods.3: detail what can be asked for +- Fixed aes_ctr_do_cipher() signature. -- compression: send zlib before none - - As the list of algorithms in a preferred order we should send zlib - before none to increase the chances that the server will let us do - compression. +Daniel Stenberg (26 Aug 2011) +- [liuzl brought this change] -- compress: faster check, better return codes - - In the transport functions we avoid a strcmp() now and just check a - boolean instead. + libssh2_sftp_seek64: flush packetlist and buffered data - The compress/decompress function's return code is now acknowledged and - used as actual return code in case of failures. + When seeking to a new position, flush the packetlist and buffered data + to prevent already received or pending data to wrongly get used when + sftp-reading from the new offset within the file. -- libssh2_session_handshake: replaces libssh2_session_startup() +- sftp_read: advance offset correctly for buffered copies - The function libssh2_session_startup() is now considered deprecated due - to the portability issue with the socket argument. - libssh2_session_handshake() is the name of the replacement. - -- libssh2_socket_t: now externally visible + In the case where a read packet has been received from the server, but + the entire contents couldn't be copied to the user-buffer, the data is + instead buffered and copied to the user's buffer in the next invocation + of sftp_read(). When that "extra" copy is made, the 'offset' pointer was + not advanced accordingly. - In preparation for upcominig changes, the libssh2_socket_t type is now - typedef'ed in the public header. - -- _libssh2_transport_drain: removed + The biggest impact of this flaw was that the 'already' variable at the + top of the function that figures out how much data "ahead" that has + already been asked for would slowly go more and more out of sync, which + could lead to the file not being read all the way to the end. - This function proved not to be used nor useful. - -- _libssh2_channel_write: don't iterate over transport writes + This problem was most noticable in cases where the application would + only try to read the exact file size amount, like curl does. In the + examples libssh2 provides the sftp read function is most often called + with a fixed size large buffer and then the bug would not appear as + easily. - When a call to _libssh2_transport_write() succeeds, we must return from - _libssh2_channel_write() to allow the caller to provide the next chunk - of data. + This bug was introduced in the SFTP rewrite in 1.2.8. - We cannot move on to send the next piece of data that may already have - been provided in this same function call, as we risk getting EAGAIN for - that and we can't return information both about sent data as well as - EAGAIN. So, by returning short now, the caller will call this function - again with new data to send. + Bug: http://curl.haxx.se/mail/lib-2011-08/0305.html + http://www.libssh2.org/mail/libssh2-devel-archive-2011-08/0085.shtml -- _libssh2_transport_write: updated documentation blurb +- wrap some long lines < 80 columns -- _libssh2_transport_write: remove fprintf remainder - - Mistake from previous debugging +- LIBSSH2_RECV: fix typo, use the RECV_FD macro -- session: improved errors - - Replaced -1/SOCKET_NONE errors with appropriate error defines instead. - - Made the verbose trace output during banner receiving less annoying for - non-blocking sessions. +- subsystem_netconf.c: fix compiler warnings -- crypt_init: use correct error define +- [Henrik Nordstrom brought this change] -- _libssh2_error: hide EAGAIN for non-blocking sessions - - In an attempt to make the trace output less cluttered for non-blocking - sessions the error function now avoids calling the debug function if the - error is the EAGAIN and the session is non-blocking. + Custom callbacks for performing low level socket I/O -- agent: use better error defines +- version bump: start working towards 1.3.0 -- comp_method_zlib_init: use correct error defines +Version 1.2.9 (16 Aug 2011) -- transport: better error codes - - LIBSSH2_SOCKET_NONE (-1) should no longer be used as error code as it is - (too) generic and we should instead use specific and dedicated error - codes to better describe the error. +Daniel Stenberg (16 Aug 2011) +- RELEASE-NOTES: synced with 95d69d3a81261 -- channel: return code and _libssh2_error cleanup - - Made sure that all transport_write() call failures get _libssh2_error - called. +- [Henrik Nordstrom brought this change] -- _libssh2_channel_write: limit to 32700 bytes + Document prototypes for macro defined functions + +- [Henrik Nordstrom brought this change] + + Avoid reuse after free when closing X11 channels + +- _libssh2_channel_write: handle window_size == 0 better - The well known and used ssh server Dropbear has a maximum SSH packet - length at 32768 by default. Since the libssh2 design current have a - fixed one-to-one mapping from channel_write() to the packet size created - by transport_write() the previous limit of 32768 in the channel layer - caused the transport layer to create larger packets than 32768 at times - which Dropbear rejected forcibly (by closing the connection). + When about to send data on the channel and the window size is 0, we must + not just return 0 if the transport_read() function returned EAGAIN as it + then causes a busy-loop. - The long term fix is of course to remove the hard relation between the - outgoing SSH packet size and what the input length argument is in the - transport_write() function call. - -- libssh.h: add more dedicated error codes + Bug: http://libssh2.org/mail/libssh2-devel-archive-2011-08/0011.shtml -- SCP: allow file names with bytes > 126 +- gettimeofday: fix name space pollution - When parsing the SCP protocol and verifying that the data looks like a - valid file name, byte values over 126 must not be consider illegal since - UTF-8 file names will use such codes. + For systems without its own gettimeofday() implementation, we still must + not provide one outside our namespace. - Reported by: Uli Zappe - Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2010-08/0112.shtml + Reported by: Bill Segall -Dan Fandrich (25 Aug 2010) -- Document the three sftp stat constants +Dan Fandrich (5 Aug 2011) +- libssh2.pc.in: Fixed spelling in pkgconfig file -Guenter Knauf (18 Aug 2010) -- Fixed Win32 makefile which was now broken at resource build. +Peter Stuge (17 Jul 2011) +- example/subsystem_netconf.c: Add missing #include -- It is sufficient to pipe stderr to NUL to get rid of the nasty messages. +- example/subsystem_netconf.c: Discard ]]>]]> and return only XML response -- [Author: Guenter Knauf brought this change] +- example/subsystem_netconf.c: Fix uninitialized variable bug - Removed Win32 ifdef completely for sys/uio.h. +- example: Add subsystem_netconf.c - No idea why we had this ifdef at all but MSVC, MingW32, Watcom - and Borland all have no sys/uio.h header; so if there's another - Win32 compiler which needs it then it should be added explicitely - instead of this negative list. - -- New files should also be added to Makefile.am. + This example demonstrates how to use libssh2 to send a request to + the NETCONF subsystem available e.g. in JunOS. - Otherwise they will never be included with release and snapshot tarballs ... - -Daniel Stenberg (18 Aug 2010) -- version: bump to 1.2.8_DEV + See also http://tools.ietf.org/html/draft-ietf-netconf-ssh-06 -Version 1.2.7 (17 Aug 2010) +Daniel Stenberg (16 Jul 2011) +- man page cleanups: non-existing functions need no man pages -Daniel Stenberg (17 Aug 2010) -- release: updated to hold 1.2.7 info +- libssh2_new_host_entry.3: removed + + This is just junk leftovers. -Guenter Knauf (17 Aug 2010) -- Use the new libssh2.rc file. +- userauth_keyboard_interactive: fix buffer overflow + + Partly reverse 566894494b4972ae12 which was simplifying the code far too + much and ended up overflowing a buffer within the LIBSSH2_SESSION + struct. Back to allocating the buffer properly like it used to do. + + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-06/0032.shtml + Reported by: Alfred Gebert -- Added resource file for libssh2.dll (shamelessly stolen from libcurl). +- keyboard-interactive man page: cleaned up -- Updated Win32 MSVC dependencies versions. +- [Alfred Gebert brought this change] -- Added include for sys/select.h to get fd.set on some platforms. + _libssh2_recv(): handle ENOENT error as EAGAIN + + A sftp session failed with error "failure establishing ssh session" on + Solaris and HP-UX. Sometimes the first recv() function call sets errno + to ENOENT. In the man pages for recv of Solaris and HP-UX the error + ENOENT is not documented. + + I tested Solaris SPARC and x86, HP-UX i64, AIX, Windows and Linux. -- Added Watcom makefile borrowed from libcurl. +- agent_list_identities: fix out of scope access - This makefile compiles already all files fine for static lib, but needs - final touch when I have OpenSSL fully working with shared libs and Watcom. + An auto variable out of scope was being referenced and used. + + fixes #220 -- Added copyright define to libssh2.h and use it for binary builds. +- _libssh2_wait_socket: fix timeouts for poll() uses -- Moved version defines up in order to include from .rc file. +- windows: inclusion fix - Blocked rest of header with ifndef so its possible to let - the rc compiler only use the version defines. - -- Some minor makefile tweaks. + include winsock2.h for all windows compilers -Daniel Stenberg (2 Aug 2010) -- example: treat the libssh2_channel_read() return code properly +- keyb-interactive: add the fixed buffer - A short read is not an error. Only negative values are errors! + Belongs to commit 5668944 -- libssh2_wait_socket: reset error code to "leak" EAGAIN less +- code cleanup: don't use C99/c++ comments - Since libssh2 often sets LIBSSH2_ERROR_EAGAIN internally before - _libssh2_wait_socket is called, we can decrease some amount of - confusion in user programs by resetting the error code in this function - to reduce the risk of EAGAIN being stored as error when a blocking - function returns. + We aim for C89 compliance -- _libssh2_wait_socket: poll needs milliseconds +- keyb-interactive: allow zero length fields + + Allow zero length fields so they don't cause malloc(0) calls - As reported on the mailing list, the code path using poll() should - multiple seconds with 1000 to get milliseconds, not divide! + Avoid free()ing NULL pointers - Reported by: Jan Van Boghout + Avoid a malloc of a fixed 5 byte buffer. -- typedef: make ssize_t get typedef without LIBSSH2_WIN32 +- libssh2_channel_process_startup.3: clean up - The condition around the ssize_t typedef depended on both LIBSSH2_WIN32 - *and* _MSC_VER being defined when it should be enough to depend on - _MSC_VER only. It also makes it nicer so libssh2-using code builds fine - without having custom defines. - -- [John Little brought this change] - - session_free: free more data to avoid memory leaks - -- channel_free: ignore problems with channel_close() + Remove the references to the macro-fied shortcuts as they have their own + individual man pages. - As was pointed out in bug #182, we must not return failure from - _libssh2_channel_free() when _libssh2_channel_close() returns an error - that isn't EAGAIN. It can effectively cause the function to never go - through, like it did now in the case where the socket was actually - closed but socket_state still said LIBSSH2_SOCKET_CONNECTED. + Made the prototype different and more readable. + +- man page: fix .BR lines - I consider this fix the right thing as it now also survives other - errors, even if making sure socket_state isn't lying is also a good - idea. + We don't use \fI etc on .BR lines -- publickey_list_free: no return value from a void function +- userauth_keyboard_interactive: skip code on zero length auth + +- libssh2_channel_forward_accept.3: mention how to get error - Fixed a compiler warning I introduced previously when checking input - arguments more. I also added a check for the other pointer to avoid NULL - pointer dereferences. + Since this returns a pointer, libssh2_session_last_errno() must be used + to get the actual error code and it wasn't that clear before. -- [Lars Nordin brought this change] +- timeout docs: mention they're added in 1.2.9 - openssl: make use of the EVP interface +- sftp_write_sliding.c: indent fix - Make use of the EVP interface for the AES-funktion. Using this method - supports the use of different ENGINES in OpenSSL for the AES function - (and the direct call to the AES_encrypt should not be used according to - openssl.org) + Use the standard indenting and removed CVS leftover comment -Peter Stuge (23 Jun 2010) -- [Tor Arntsen brought this change] +- [zl liu brought this change] - Don't overflow MD5 server hostkey + sftp_write_sliding: send the complete file - Use SHA_DIGEST_LENGTH and MD5_DIGEST_LENGTH in memcpy instead of hardcoded - values. An incorrect value was used for MD5. + When reaching the end of file there can still be data left not sent. -- Fix message length bugs in libssh2_debug() +- [Douglas Masterson brought this change] + + session_startup: init state properly - There was a buffer overflow waiting to happen when a debug message was - longer than 1536 bytes. + libssh2_session_startup() didn't set the state correctly so it could get + confused. - Thanks to Daniel who spotted that there was a problem with the message - length passed to a trace handler also after commit - 0f0652a3093111fc7dac0205fdcf8d02bf16e89f. + Fixes #218 -- Make libssh2_debug() create a correctly terminated string +- timeout: added man pages + +- BLOCK_ADJUST_ERRNO: move rc to right level - Also use FILE *stderr rather than fd 2, which can very well be something - completely different. + We can't declare the variable within the block and use it in the final + do-while() expression to be properly portable C89. -Daniel Stenberg (23 Jun 2010) -- [TJ Saunders brought this change] +- [Matt Lilley brought this change] - handshake: Compression enabled at the wrong time - - In KEXINIT messages, the client and server agree on, among other - things, whether to use compression. This method agreement occurs - in src/kex.c's kex_agree_methods() function. However, if - compression is enabled (either client->server, server->client, or - both), then the compression layer is initialized in - kex_agree_methods() -- before NEWKEYS has been received. - - Instead, the initialization of the compression layer should - happen after NEWKEYS has been received. This looks to occur - insrc/kex.c's diffie_hellman_sha1(), which even has the comment: + adds a timeout to blocking calls - /* The first key exchange has been performed, + Fixes bug #160 as per Daniel's suggestion - switch to active crypt/comp/mac mode */ + Adds libssh2_session_set_timeout() and libssh2_session_get_timeout() + +- SCP: fix incorrect error code - There, after NEWKEYS is received, the cipher and mac algorithms - are initialized, and that is where the compression should be - initialized as well. + After an error occurs in libssh2_scp_recv() or libssh2_scp_send(), the + function libssh2_session_last_error() would return + LIBSSH2_ERROR_SOCKET_NONE on error. - The current implementation fails if server->client compression is - enabled because most server implementations follow OpenSSH's - lead, where compression is initialized after NEWKEYS. Since the - server initializes compression after NEWKEYS, but libssh2 - initializes compression after KEXINIT (i.e. before NEWKEYS), they - are out of sync. + Bug: http://trac.libssh2.org/ticket/216 + Patch by: "littlesavage" - Reported in bug report #180 + Fixes #216 -- [TJ Saunders brought this change] +Guenter Knauf (19 Apr 2011) +- Updated default (recommended) dependency versions. - userauth_hostbased_fromfile: packet length too short - - The packet length calculated in src/userauth.c's - userauth_hostbased_fromfile() function is too short by 4 bytes; - it forgets to add four bytes for the length of the hostname. - This causes hostbased authentication to fail, since the server - will read junk data. +Daniel Stenberg (17 Apr 2011) +- libssh2_session_block_directions: fix mistake - verified against proftpd's mod_sftp module - -- _libssh2_userauth_publickey: reject method names longer than the data + The last LIBSSH2_SESSION_BLOCK_INBOUND should be + LIBSSH2_SESSION_BLOCK_OUTBOUND - This functions get the method length by looking at the first 32 - bit of data, and I now made it not accept method lengths that are - longer than the whole data set is, as given in the dedicated - function argument. + And I shortened the short description - This was detected when the function was given bogus public key - data as an ascii string, which caused the first 32bits to create - a HUGE number. + Reported by: "drswinghead" -- NULL resistance: make more public functions survive NULL pointer input +- msvcproj: added libs and debug stuff - Sending in NULL as the primary pointer is now dealt with by more - public functions. I also narrowed the userauth.c code somewhat to - stay within 80 columns better. - -- agent: make libssh2_agent_userauth() work blocking properly + Added libraries needed to link whether using openssl dynamically or + statically - previously it would always work in a non-blocking manner - -Peter Stuge (17 Jun 2010) -- Fix underscore typo for 64-bit printf format specifiers on Windows + Added LIBSSH2DEBUG define to debug versions to enable tracing - Commit 49ddf447ff4bd80285f926eac0115f4e595f9425 was missing underscores. + URL: http://trac.libssh2.org/ticket/215 + Patch by: Mark Smith -Daniel Stenberg (16 Jun 2010) -- libssh2_session_callback_set: extended the man page +- sftp_write: clean offsets on error + + When an error has occurred on FXP_WRITE, we must make sure that the + offset, sent offset and acked counter are reset properly. -- [John brought this change] +- example/.gitignore: ignore built binaries - LIBSSH2_DEBUG: macro uses incorrect function variable - - The LIBSSH2_DEBUG macro, defined in libssh2_priv.h, incorrectly uses the - function variable ssh_msg_disconnect when it should use ssh_msg_debug. +- sftp_write: flush the packetlist on error - This shows that the LIBSSH2_CALLBACK_DEBUG callback never has worked... + When an error occurs during write, flush the entire list of pending + outgoing SFTP packets. -- warning: fix a compiler warning 'pointer differs in signedness' +- keepalive: add first basic man pages - As reported in bug #177 + Someone on IRC pointed out that we don't have these documented so I + wrote up a first set based on the information in the wiki: + http://trac.libssh2.org/wiki/KeepAlive -- portability: introduce LIBSSH2_INT64_T_FORMAT for 64bit printf()s +- scp_write_nonblock.c: remove pointless check - As pointed out in bug #177, some of the Windows compilers use - %I64 to output 64 bit variables with the printf family. + libssh2_channel_write() cannot return a value that is larger than the + input length value -- debug: avoid sending NULL to sprintf %s - - Via the _libssh2_debug() macro/function. Pointed out by john in bug report +Mikhail Gusarov (9 Apr 2011) +- s/\.NF/.nf/ to fix wrong macro name caught by man --warnings -- sftp docs: show macro on macro page, only function on function page +Daniel Stenberg (6 Apr 2011) +- version: bump to 1.2.9_dev - The individual man pages for macros now show the full convenience - macro as defined, and then the man page for the actual function - only shows the function. - -- code police: make the code use less than 80 columns + Also update the copyright year range to include 2011 -- libssh2_channel_write_ex: remove macros, added wording on buffer size +- configure: fix $VERSION + + Stop using the $VERSION variable as it seems to be magically used by + autoconfig itself and thus gets set to the value set in AC_INIT() + without us wanting that. $LIBSSH2VER is now the libssh2 version as + detected. + + Reported by: Paul Howarth + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2011-04/0008.shtml -- libssh2_sftp_write: document buffer size and changed some ordering +- maketgz: use git2news.pl by the correct name -- libssh2_channel_write_stderr: show how the macro is defined +Version 1.2.8 (4 Apr 2011) -- libssh2_channel_write: show how the macro is defined +Daniel Stenberg (4 Apr 2011) +- RELEASE-NOTES: synced with fabf1a45ee -- SFTP: limit write() to not produce overly large packets - - sftp_write() now limits how much data it gets at a time even more - than before. Since this function creates a complete outgoing - packet based on what gets passed to it, it is crucial that it - doesn't create too large packets. +- NEWS: auto-generated from git - With this method, there's also no longer any problem to use very - large buffers in your application and feed that to libssh2. I've - done numerous tests now with uploading data over SFTP using 100K - buffers and I've had no problems with that. + Starting now, the NEWS file is generated from git using the git2news.pl + script. This makes it always accurate and up-to-date, even for daily + snapshots etc. -- scp_write_nonblock: add transfer time info +- sftp_write: handle FXP_WRITE errors - Using the same timing logic and output format as - sftp_write_nonblock allows us to very easily run benchmarks on - SCP vs SFTP uploads using libssh2. + When an sftp server returns an error back on write, make sure the + function bails out and returns the proper error. -- sftp_write_nonblock: select() on socket, use *BIG* buffer, time transfer +- configure: stop using the deprecated AM_INIT_AUTOMAKE syntax + +Alexander Lamaison (13 Mar 2011) +- Support unlimited number of host names in a single line of the known_hosts file. - The select() is just to make it nicer so that it doesn't - crazy-loop on EAGAIN. The buffer size thing is mostly to verify - that this really work as supposed. + Previously the code assumed either a single host name or a hostname,ip-address pair. However, according to the spec [1], there can be any number of comma separated host names or IP addresses. - Transfer timing is just a minor thing, but it can just as well be - there and help us time and work on performance easier using out - of the box examples. + [1] http://www.openbsd.org/cgi-bin/man.cgi?query=sshd&sektion=8 -- agent: use _libssh2_error() when returning errors +Daniel Stenberg (26 Feb 2011) +- libssh2_knownhost_readfile.3: clarify return value - As pointed out in bug report #173, this module basically never - used _libssh2_error() which made it work inconstently with other - parts of the libssh2 code base. This is my first take at making - this code more in line with the rest. + This function returns the number of parsed hosts on success, not just + zero as previously documented. -- inputchecks: make lots of API functions check for NULL pointers +Peter Stuge (26 Feb 2011) +- Don't save allocated packet size until it has actually been allocated - If an application accidentally provides a NULL handle pointer to - the channel or sftp public functions, they now return an error - instead of segfaulting. + The allocated packet size is internal state which needs to match reality + in order to avoid problems. This commit fixes #211. -- libssh2_channel_eof: clarify that it returns negative on errors +Daniel Stenberg (21 Feb 2011) +- [Alfred Gebert brought this change] -- SFTP: keep the sftp error code as 32 bit + session_startup: manage server data before server identification - 'last_errno' holds to the error code from the SFTP protocol and - since that is 32 bits on the wire there's no point in using a - long for this internally which is larger on some platforms. - -- agent: make the code better deal with unexpected code flows + Fix the bug that libssh2 could not connect if the sftp server + sends data before sending the version string. - agent->ops gets initialized by the libssh2_agent_connect() call - but we need to make sure that we don't segfault even if a bad - sequence of function calls is used. - -Alexander Lamaison (10 Jun 2010) -- Better handling of invalid key files. + http://tools.ietf.org/html/rfc4253#section-4.2 - Passing an invalid public key to libssh2_userauth_publickey_fromfile_ex - triggered an assertion. Replaced this with a runtime check that rejects - obviously invalid key data. - -Daniel Stenberg (10 Jun 2010) -- version: we start working on 1.2.7 now - -Version 1.2.6 (10 Jun 2010) + "The server MAY send other lines of data before sending the version + string. Each line SHOULD be terminated by a Carriage Return and Line + Feed. Such lines MUST NOT begin with "SSH-", and SHOULD be encoded + in ISO-10646 UTF-8 [RFC3629] (language is not specified). Clients + MUST be able to process such lines." -Daniel Stenberg (10 Jun 2010) -- NEWS: add the 1.2.6 release details +- [Alfred Gebert brought this change] -- RELEASE-NOTES: 1.2.6 details added + fullpacket: decompression only after init + + The buffer for the decompression (remote.comp_abstract) is initialised + in time when it is needed. With this fix decompression is disabled when + the buffer (remote.comp_abstract) is not initialised. + + Bug: http://trac.libssh2.org/ticket/200 -Guenter Knauf (10 Jun 2010) -- fixed libssh2.dsw to use the generated libssh2.dsp; removed old *.dsp files. +- _libssh2_channel_read: store last error + + When the transport layer returns EAGAIN this function didn't call + _libssh2_error() which made the last_error not get set. -- moved MSVC strdup define to libssh2_config.h which we include already. +- sftp_write: clarified the comment header -- added missing source files to src/NMakefile. +- sftp_read: avoid wrapping counter to insanity + + As pointed out in bug #206, if a second invoke of libssh2_sftp_read() + would shrink the buffer size, libssh2 would go nuts and send out read + requests like crazy. This was due to an unsigned variable turning + "negative" by some wrong math, and that value would be the amount of + data attempt to pre-buffer! + + Bug: http://trac.libssh2.org/ticket/206 -Daniel Stenberg (8 Jun 2010) -- libssh2_poll: refer to poll(3) and select(3) instead +- sftp_packet_read: use 32bit variables for 32bit data -- example: fix strdup() for MSVC compiles +- libssh2_sftp_stat_ex.3: cleaned up, extended + + Removed the macros from it as they have their own man pages. - MSVC has a _strdup() that we better use. This was reported in bug + Added the LIBSSH2_SFTP_ATTRIBUTES struct in here for easier reference. -- SFTP: fail init SFTP if session isn't authenticated +- sftp_readdir: return error if buffer is too small - Alexander Lamaison filed bug #172 - (http://trac.libssh2.org/ticket/172), and pointed out that SFTP - init would do bad if the session isn't yet authenticated at the - time of the call, so we now check for this situation and returns - an error if detected. Calling sftp_init() at this point is bad - usage to start with. + If asked to read data into a buffer and the buffer is too small to hold + the data, this function now returns an error instead of as previously + just copy as much as fits. -- direct_tcpip: bring back inclusion of libssh2_config.h +- sftp_symlink: return error if receive buffer too small - In order to increase portability of this example, I'm bringing - the inclusion of libssh2_config.h back, and I also added an - require that header for this example to compile. + and clean up some variable type mismatches - I also made all code lines fit within 80 columns. - -Guenter Knauf (3 Jun 2010) -- cast away a warning. - -- moved CRT_SECURE_NO_DEPRECATE define up so its defined before the winsock headers are included. - -- fixed platform detection for MingW32 test makefile. - -- MingW32 has gettimeofday() implemented, so proper ifdef this function here. - -- removed MSVC ifdef since seems we can use __int64 still with latest headers. - -- changed copyright notice for MinW32 and NetWare binaries. - -- cleaned up MSVC ifdefs which where spreaded over 3 places. - -- added uint8_t typedef for NetWare CLIB platform. - -- if the function declaration gets changed the header should be changed too. - -- this is MSVC specific and doesnt apply for all Win32 compilers; - the uint8_t typedef clashes with MingW32 headers. - -- updated MingW32 makefiles for latest dependency lib versions. - -- updated NetWare makefiles for latest dependency lib versions. + Discussion: http://www.libssh2.org/mail/libssh2-devel-archive-2011-01/0001.shtml -Dan Fandrich (30 May 2010) -- Fixed compiling with libgcrypt +- docs: clarify what happens with a too small buffer - A change of parameter types from unsigned long to size_t was - missed in the prototype in libgcrypt.h + This flaw is subject to change, but I figured it might be valuable to + users of existing code to know how it works. -Daniel Stenberg (28 May 2010) -- statvfs: use libssh2_sftp_statvfs only, no "_ex" +- channel_request_pty_size: fix reqPTY_state - As the long-term goal is to get rid of the extensive set of - macros from the API we can just as well start small by not adding - new macros when we add new functions. Therefore we let the - function be libssh2_sftp_statvfs() plainly without using an _ex - suffix. + The state variable isn't properly set so every other call to the + function fails! - I also made it use size_t instead of unsigned int for the string - length as that too is a long-term goal for the API. - -- [Grubsky Grigory brought this change] - - DSP: output lib name typo - -- [Grubsky Grigory brought this change] - - win32: provide a uint8_t typedef for better building on windows + Bug: http://libssh2.org/mail/libssh2-devel-archive-2010-12/0096.shtml + Reported by: Steve Legg -- agent: win32: fix bad _libssh2_store_str call +- data size: cleanup - As pointed out by Grubsky Grigory , I - made a mistake when I added the _libssh2_store_str() call before - and I made a slightly different patch than what he suggested. - Based purely on taste. + Fix 64bit warnings by using (s)size_t and dedicated uint32_t types more. -Peter Stuge (24 May 2010) -- [Joey Degges brought this change] +- [Pierre Joye brought this change] - Add libssh2_sftp_statvfs() and libssh2_sftp_fstatvfs() + ssize_t: proper typedef with MSVC compilers - These can be used to get file system statistics from servers that - support the statvfs@openssh.com and fstatvfs@openssh.com extensions. - -Alexander Lamaison (22 May 2010) -- [Jose Baars brought this change] - - VMS specific: make sure final release can be installed over daily build + As discussed on the mailing list, it was wrong for win64 and using the + VC-provided type is the safest approach instead of second- guessing + which one it should be. -- [Jose Baars brought this change] +Guenter Knauf (22 Dec 2010) +- Updated OpenSSL version. - VMS: small improvement to the man2help utilities +- Expanded tabs to spaces. -Peter Stuge (22 May 2010) +Peter Stuge (21 Dec 2010) - [Joey Degges brought this change] - libssh2_exit and libssh2_sftp_readdir man page fixes - -Daniel Stenberg (21 May 2010) -- spelling: s/sue/use - -Alexander Lamaison (21 May 2010) -- Change magic port number for generic knownhost check. + _libssh2_ntohu64: fix conversion from network bytes to uint64 - libssh2_knownhost_checkp took 0 as a magic port number that indicated - a 'generic' check should be performed. However, 0 is a valid port - number in its own right so this commit changes the magic value to any - negative int. - -Mikhail Gusarov (5 May 2010) -- Add re-discovered copyright holders to COPYING + Cast individual bytes to uint64 to avoid overflow in arithmetic. -- Restoring copyright statements from pre-git era +Daniel Stenberg (20 Dec 2010) +- libssh2_userauth_list: language fix - Eli Fant has contributed fragmenting SFTP requests + "faily" is not a good English word, and I also cleaned up some other minor + mistakes -- Restoring my copyright statements from pre-git era +- crypto: unify the generic functions - keyboard_interactive, 'exit-status' information packet, non-atomic read/write - under FreeBSD, multi-channel operation bugfixes. - -Daniel Stenberg (3 May 2010) -- pedantic: make the code C90 clean - -Peter Stuge (3 May 2010) -- Do proper keyboard-interactive user dialog in the sftp.c example + Added crypto.h that is the unified header to include when using crypto + functionality. It should be the only header that needs to adapt to the + underlying crypto library in use. It provides the set of prototypes that + are library agnostic. -Daniel Stenberg (3 May 2010) -- added to tarball: libssh2_knownhost_checkp.3 +- [Mark Smith brought this change] -- knownhost: support [host]:port in knownhost file + userauth: derive publickey from private - OpenSSH has ways to add hosts to the knownhosts file that include - a specific port number which makes the key associated with only - that specific host+port pair. libssh2 previously did not support - this, and I was forced to add a new function to the API to - properly expose this ability to applications: - libssh2_knownhost_checkp() + Pass a NULL pointer for the publickey parameter of + libssh2_userauth_publickey_fromfile and + libssh2_userauth_hostbased_fromfile functions. In this case, the + functions recompute the public key from the private key file data. - To *add* such hosts to the knownhosts file, you make sure to pass - on the host name in that manner to the libssh2_knownhost_addc() - function. - -- init/exit: mention these were added in 1.2.5 - -- libssh2_knownhost_check docs: correct the prototype - -- examples: avoid use of uninitialized variable 'sock' - -- KEX: stop pretending we negotiate language + This is work done by Jean-Louis CHARTON + , then adapted by Mark Smith and + slightly edited further by me Daniel. - There was some stub-like parts of an implementation for - implementing kex language negotiation that caused clang-analyzer - to warn and as it did nothing I've now removed the dead code. - -- Uninitialized argument - -- sftpdir: removed dead assignment - -- Makefile.am: include the VMS-specific config header as well - -- [Jose Baars brought this change] + WARNING: this does leave the feature NOT WORKING when libssh2 is built + to use libgcrypt instead of OpenSSL simply due to lack of + implementation. - Add VMS specific libssh2_config.h +- ssh2_echo: Value stored to 'exitcode' is never read -- fix Value stored to 's' is never read warning +- _libssh2_packet_add: fix SSH_MSG_DEBUG weirdness - and moved variable declaration of s to be more local + I believe I may have caused this weird typo style error when I cleaned + up this function a while ago. Corrected now. -- kexinit: simplify the code and avoid scan-build warning +- uint32: more longs converted to proper types - Previously it would say "Value stored to 's' is never read" due - fourth increment of 's'. - -Alexander Lamaison (28 Apr 2010) -- Removed unecessary brackets. - -- Changed sftp_attrsize macro to a static function. - -Daniel Stenberg (28 Apr 2010) -- release: include the VMS-specific files + I also moved the MAC struct over to the mac.h header file and made sure + that the users of that struct include that file. -- sftp_attrsize: protect the macro argument with proper parentheses +- SFTP: more types to uint32_t + + The 'num_names' field in the SSH_FXP_NAME response is an unsigned 32bit + value so we make sure to treat it like that. -- ssh2_agent: avoid using 'session' uninitialized on failures +- SFTP: request_ids are uint32_t + + I went over the code and made sure we use uint32_t all over for the + request_id data. It is an unsigned 32bit value on the wire. -- examples: remove assignments of variable rc that's never used +- SFTP: store request_id separately in packets + + By using a new separate struct for incoming SFTP packets and not sharing + the generic packet struct, we can get rid of an unused field and add a + new one dedicated for holding the request_id for the incoming + package. As sftp_packet_ask() is called fairly often, a "mere" integer + comparison is MUCH faster than the previous memcmp() of (typically) 5 + bytes. -- publickey_init: remove useless variable increment +- libssh2_sftp_open_ex: man page extended and cleaned up + + I added the missing documentation for the 'flags' argument. -- hostkey_method_ssh_rsa_init: remove useless variable increment +- SFTP: unify the READ/WRITE chunk structs -- packet_x11_open: removed useless variable increment +- SFTP: fix memory leaks - and made the declaration of a variable more local + Make sure that we cleanup remainders when the handle is closed and when + the subsystem is shutdown. + + Existing flaw: if a single handle sends packets that haven't been + replied to yet at the time when the handle is closed, those packets will + arrive later and end up in the generic packet brigade queue and they + will remain in there until flushed. They will use unnecessary memory, + make things slower and they will ruin the SFTP handling if the + request_id counter ever wraps (highly unlikely to every happen). -- packet_queue_listener: removed useless variable increment +- sftp_close_handle: packet list is generic - and made the declaration of a variable more local + Fix comment, simplify the loop logic -- sftp_read: move a read_responses array to where its used +- sftp_read: pipeline reads - I find that this increases readability since the array is used - only in the function call just immediately below and nowhere - else. + The SFTP read function now does transfers the same way the SFTP write + function was made to recently: it creates a list of many outgoing + FXP_READ packets that each asks for a small data chunk. The code then + tries to keep sending read request while collecting the acks for the + previous requests and returns the received data. -- sftp_readdir: turn a small array static const and move it +- sftp_write: removed unused variable -- sftp_attrsize: converted function to a macro +- _libssh2_channel_close: don't call transport read if disconnected + + The loop that waits for remote.close to get set may end up looping + forever since session->socket_state gets set to + LIBSSH2_SOCKET_DISCONNECTED by the packet_add() function called from the + transport_read() function and after having been set to + LIBSSH2_SOCKET_DISCONNECTED, the transport_read() function will only + return 0. - This way, the macro can evaluate a static number at compile time - for two out of four uses, and it probably runs faster for the - other two cases too. + Bug: http://trac.libssh2.org/ticket/198 -- sftp_open: deal with short channel_write calls +- libssh2_sftp_seek64: new man page - This was an old TODO that just wasn't done before. If - channel_write returns short, that is not an error. + Split off libssh2_sftp_seek64 from the libssh2_sftp_seek man page, and + mentioned that we consider the latter deprecated. Also added a mention + about the dangers of doing seek during writing or reading. -- sftp_open: clean up, better check of input data +- sftp_seek: fix - The clang-analyzer report made it look into this function and - I've went through it to remove a potential use of an - uninitialized variable and I also added some validation of input - data received from the server. + The new SFTP write code caused a regression as the seek function no + longer worked as it didn't set the write position properly. - In general, lots of more code in this file need to validate the - input before assuming it is correct: there are servers out there - that have bugs or just have another idea of how to do the SFTP - protocol. - -- bugfix: avoid using the socket if it failed to create one + It should be noted that seeking is STRONGLY PROHIBITED during upload, as + the upload magic uses two different offset positions and the multiple + outstanding packets etc make them sensitive to change in the midst of + operations. + + This functionality was just verified with the new example code + sftp_append. This bug was filed as bug #202: + + Bug: http://trac.libssh2.org/ticket/202 -- bugfix: potential use of NULL pointer +- sftp_append: new example doing SFTP append -- libssh2_userauth_password_ex: clarify errors somewhat +- MAX_SFTP_OUTGOING_SIZE: 30000 - The errors mentioned in this man page are possible return codes - but not necessarily the only return codes that this can return. + I ran SFTP upload tests against localhost. It showed that to make the + app reach really good speeds, I needed to do a little code tweak and + change MAX_SFTP_OUTGOING_SIZE from 4000 to 30000. The tests I did before + with the high latency tests didn't show any real difference whatever I + had that size set to. - Also reformatted the typ prototypes somewhat. + This number is the size in bytes that libssh2 cuts off the large input + buffer and sends off as an individual sftp packet. -- examples: fixed and made them more similar +- sftp_write_sliding.c: new example - The channel read/write functions can return 0 in legitimate cases - without it being an error, and we need to loop properly if they - return short. - -- [Jose Baars brought this change] - - VMS port of libssh2; changes in the libssh2 common code - -- Makefile: added the two news headers userauth.h and session.h - -- cleanup: prefer the internal functions + This is an example that is very similar to sftp_write_nonblock.c, with + the exception that this uses - To get the blocking vs non-blocking to work as smooth as possible - and behave better internally, we avoid using the external - interfaces when calling functions internally. + 1 - a larger upload buffer + + 2 - a sliding buffer mechnism to allow the app to keep sending lots of + data to libssh2 without having to first drain the buffer. - Renamed a few internal functions to use _libssh2 prefix when not - being private within a file, and removed the libssh2_ for one - that was private within the file. + These are two key issues to make libssh2 SFTP uploads really perform + well at this point in time. -- session_free: remove dead code +- cpp: s/#elsif/#elif + + This looks like a typo as #elsif is not really C... -- libssh2_publickey_init: fixed to work better non-blocking +- _libssh2_channel_write: revert channel_write() use - This was triggered by a clang-analyzer complaint that turned out - to be valid, and it made me dig deeper and fix some generic non- - blocking problems I disovered in the code. + The attempts made to have _libssh2_channel_write() accept larger pieces + of data and split up the data by itself into 32700 byte chunks and pass + them on to channel_write() in a loop as a way to do faster operations on + larger data blocks was a failed attempt. - While cleaning this up, I moved session-specific stuff over to a - new session.h header from the libssh2_priv.h header. - -- channel: reduce duplicated free and returns + The reason why it is difficult: - Simplified the code by trying to free data and return on a single - spot. - -- channel: make variables more local + The API only allows EAGAIN or a length to be returned. When looping over + multiple blocks to get sent, one block can get sent and the next might + not. And yet: when transport_send() has returned EAGAIN we must not call + it again with new data until it has returned OK on the existing data it + is still working on. This makes it a mess and we do get a much easier + job by simply returning the bytes or EAGAIN at once, as in the EAGAIN + case we can assume that we will be called with the same arguments again + and transport_send() will be happy. - By making 'data' and 'data_len' more local in several places in - this file it will be easier to spot how they are used and we'll - get less risks to accidentally do bad things with them. - -Mikhail Gusarov (24 Apr 2010) -- Fix typos in manpages, catched by Lintian + Unfortunately, I think we take a small performance hit by not being able + to do this. -Daniel Stenberg (24 Apr 2010) -- channel_request_pty: simplify the code +- ssh2_echo: new example - clang-analyzer pointed out how 'data' could be accessed as a NULL - pointer if the wrong state was set, and while I don't see that - happen in real-life the code flow is easier to read and follow by - moving the LIBSSH2_FREE() call into the block that is supposed to - deal with the data pointer anyway. + This is a new example snippet. The code is largely based on ssh2_exec, + and is written by Tommy Lindgren. I edited it into C90 compliance and to + conform to libssh2 indent style and some more. -- libssh2_channel_process_startup: simplify the code +- send_existing: return after send_existing - clang-analyzer pointed out how 'data' could be accessed as a NULL - pointer if the wrong state was set, and while I don't see that - happen in real-life the code flow is easier to read and follow by - moving the LIBSSH2_FREE() call into the block that is supposed to - deal with the data pointer anyway. - -- sftp_close_handle: add precation to not access NULL pointer + When a piece of data is sent from the send_existing() function we must + make the parent function return afterwards. Otherwise we risk that the + parent function tries to send more data and ends up getting an EGAIN for + that more data and since it can only return one return code it doesn't + return info for the successfully sent data. - clang-analyzer pointed this out as a "Pass-by-value argument in - function call is undefined" but while I can't see exactly how - this can ever happen in reality I think a little check for safety - isn't such a bad thing here. - -- scp_write_nonblock: Value stored to 'nread' is never read - -- scp_write: Value stored to 'ptr' is never read + As this change is a regression I now added a larger comment explaining + why it has to work like this. -- scp_write_nonblock: Value stored to 'ptr' is never read +- _libssh2_channel_write: count resent data as written + + In the logic that resends data that was kept for that purpose due to a + previous EAGAIN, the data was not counted as sent causing badness. -- sftp_mkdir: less silly output but show failures +Peter Stuge (13 Nov 2010) +- Use fprintf(stderr, ) instead of write(2, ) for debugging -- [Jose Baars brought this change] +- session/transport: Correctly handle when _libssh2_send() returns -EAGAIN - VMS port of libssh2 including VMS specific build procedures +- src/agent.c: Simplify _libssh2_send() error checking ever so slightly -- two variable types changes, made lines less than 80 columns +Daniel Stenberg (12 Nov 2010) +- send/recv: use _libssh2_recv and _libssh2_send now - The two variable type changes are only to match type variable - fields actually read from the binary protocol. - -- remove check for negative padding_length + Starting now, we unconditionally use the internal replacement functions + for send() and recv() - creatively named _libssh2_recv() and + _libssh2_send(). - It was silly, since it is read as an unsigned char... - -- hostkey_method_ssh_dss_init: Value stored to 's' is never read - -- libssh2_banner_set: avoid unnecessary increment and explain code - -- agent_transact_unix: remove unused variable - -- remove two unnecessary increments - -- more code converted to use _libssh2_store_*() - -- libssh2_publickey_list_fetch: removed unused variables - -- libssh2_publickey_init: remove unused variables + On errors, these functions return the negative 'errno' value instead of + the traditional -1. This design allows systems that have no "natural" + errno support to not have to invent it. It also means that no code + outside of these two transfer functions should use the errno variable. -- libssh2_scp_send64: added to API to provide large file transfers +- channel_write: move some logic to _libssh2_channel_write - The previously existing libssh2_scp_send_ex() function has no way - to send files that are larger than 'size_t' which on 32bit - systems mean 4GB. This new API uses a libssh2_int64_t type and - should thus on most modern systems be able to send enormous - files. - -- sftp_init: remove unused variables and assignments - -- libssh2_knownhost_check: Value stored to 'keylen' is never read - -- hostkey: fix compiler warning - -- remove unused variable - -- data types: convert more to use size_t and uint32_t - -- channel: variable type cleanups + Some checks are better done in _libssh2_channel_write just once per + write instead of in channel_write() since the looping will call the + latter function multiple times per _libssh2_channel_write() invoke. -- cleanups: better binary packet gen, size_t fixes and PACKET_* removal - - I'll introduce a new internal function set named - - _libssh2_store_u32 - _libssh2_store_u64 - _libssh2_store_str +- sftp_write: handle "left over" acked data - That can be used all through the library to build binary outgoing - packets. Using these instead of the current approach removes - hundreds of lines from the library while at the same time greatly - enhances readability. I've not yet fully converted everything to - use these functions. + The SFTP handle struct now buffers number of acked bytes that haven't + yet been returned. The way this is used is as following: - I've converted LOTS of 'unsigned long' to 'size_t' where - data/string lengths are dealt with internally. This is The Right - Thing and it will help us make the transition to our - size_t-polished API later on as well. + 1. sftp_write() gets called with a buffer of let say size 32000. We + split 32000 into 8 smaller packets and send them off one by one. One of + them gets acked before the function returns so 4000 is returned. - I'm removing the PACKET_* error codes. They were originally - introduced as a set of separate error codes from the transport - layer, but having its own set of errors turned out to be very - awkward and they were then converted into a set of #defines that - simply maps them to the global libssh2 error codes instead. Now, - I'l take the next logical step and simply replace the PACKET_* - defines with the actual LIBSSH2_ERROR_* defines. It will increase - readability and decrease confusion. + 2. sftp_write() gets called again a short while after the previous one, + now with a much smaller size passed in to the function. Lets say 8000. + In the mean-time, all of the remaining packets from the previous call + have been acked (7*4000 = 28000). This function then returns 8000 as all + data passed in are already sent and it can't return any more than what + it got passed in. But we have 28000 bytes acked. We now store the + remaining 20000 in the handle->u.file.acked struct field to add up in + the next call. - I also separated packet stuff into its own packet.h header file. - -- clarified the return code + 3. sftp_write() gets called again, and now there's a backlogged 20000 + bytes to return as fine and that will get skipped from the beginning + of the buffer that is passed in. -- rename libssh2_error to the correct _libssh2_error +- sftp_write: polished and simplified - We reserve ^libssh2_ for public symbols and we use _libssh2 as - prefix for internal ones. I fixed the intendation of all these - edits with emacs afterwards, which then changed it slightly more - than just _libssh2_error() expressions but I didn't see any - obvious problems. - -- data type cleanup: made lots of code use size_t etc + Removed unnecessary struct fields and state changes within the function. - A lot of code used 'unsigned long' and the likes when it should - rather just use plain 'int' or use size_t for data lengths. - -- wait_socket: make c89 compliant and use two fd_sets for select() + Made the loop that checks for ACKs only check chunks that were fully + sent. -- sftp_readdir: always zero terminate, detail the return code +- SCP: on failure, show the numerical error reason - I also added a description for the 'longentry' field which was - previously undocumented! + By calling libssh2_session_last_errno() -- sftp_readdir: simplified and bugfixed - - This function no longer has any special purpose code for the - single entry case, as it was pointless. - - The previous code would overflow the buffers with an off-by-one - in case the file name or longentry data fields received from the - server were exactly as long as the buffer provided to - libssh2_sftp_readdir_ex. - - We now make sure that libssh2_sftp_readdir_ex() ALWAYS zero - terminate the buffers it fills in. - - The function no longer calls the libssh2_* function again, but - properly uses the internal sftp_* instead. +- SFTP: provide the numerical error reason on failure -- channel/transport: we now drain the outgoing send buffer when we ignore EAGAIN +- SCP: clean up failure treatment - When we ignore the EAGAIN from the transport layer within channel_write, we - now drain the outgoing transport layer buffer so that remainders in that - won't cause any problems in the next invoke of _libssh2_transport_write() + When SCP send or recv fails, it gets a special message from the server + with a warning or error message included. We have no current API to + expose that message but the foundation is there. Removed unnecessary use + of session struct fields. -- channel_write: if data has been sent, don't return EAGAIN - - When sending data in a loop, we must not return EAGAIN if we - managed to send data in an earlier round. This was reported in - bug #126 => http://libssh2.stuge.se/ticket/126 +- sftp_write: enlarge buffer to perform better -Simon Josefsson (14 Apr 2010) -- Fix OpenSSL AES-128-CTR detection. +- packets: code cleanup - Patch from Paul Howarth . - -Daniel Stenberg (13 Apr 2010) -- version in header file now says 1.2.6-DEV + I added size checks in several places. I fixed the code flow to be easier + to read in some places. + + I removed unnecessary zeroing of structs. I removed unused struct fields. -- 1.2.6: clean the RELEASE-NOTES for next release round +- LIBSSH2_CALLBACK_MACERROR: clarify return code use -- NEWS: add the stuff from the version 1.2.5 RELEASE-NOTES +- _libssh2_userauth_publickey: avoid shadowing -Version 1.2.5 (13 Apr 2010) +- packet: avoid shadowing global symbols -Daniel Stenberg (13 Apr 2010) -- channel_close: no longer wait for the SSH_MSG_CHANNEL_CLOSE message - - As the packet may simply not arrive we cannot have the close - function wait for it unconditionally. +- sftp_readdir: avoid shadowing -- less code duplication in the poll vs select code flows - - libssh2_keepalive_send and libssh2_session_block_directions are - now used outside of the #ifdef blocks. +- shadowing: don't shadow the global compress -- make it C90 compliant +- _libssh2_packet_add: turn ifs into a single switch -- updated with all changes and bugs since 1.2.4 +- _libssh2_packet_add: check SSH_MSG_GLOBAL_REQUEST packet -- Added LIBSSH2_SFTP_S_IS***() macros and updated docs +- _libssh2_packet_add: SSH_MSG_DEBUG length checks - libssh2_sftp_fstat_ex.3 is now extended quite a lot to describe a - lot of the struct and the bits it uses and how to test for them. + Verify lengths before using them. Read always_display from the correct + index. Don't copy stuff around just to provide zero-termination of the + strings. -- sftp_init() deal with _libssh2_channel_write() returns short +- _libssh2_packet_add: SSH_MSG_IGNORE skip memmove - When _libssh2_channel_write() is asked to send off 9 bytes, the - code needs to deal with the situation where less than 9 bytes - were sent off and prepare to send the remaining piece at a later - time. + There's no promise of a zero termination of the data in the callback so + no longer perform ugly operation in order to provide it. -- handle a NULL password as if it was "" +- _libssh2_packet_add: SSH_MSG_DISCONNECT length checks - libssh2_userauth_publickey_fromfile_ex() takes a "passphrase" - but didn't deal with it being set to NULL. + Verify lengths before trying to read data. -- Reduce used window sizes by factor 10 - - As reported in bug report #166 http://libssh2.stuge.se/ticket/166 - by 'ptjm', the maximum window size must be less crazy for libssh2 - to do better with more server implementations. I did not do any - testing to see how this changes raw SCP performance, but the - maximum window size is still almost 4MB. This also has the upside - that libssh2 will use less memory. +- indent: break lines at 80 columns -Peter Stuge (28 Mar 2010) -- Correctly clear blocking flag after sending multipart packet - - commit 7317edab61d2179febc38a2c2c4da0b951d74cbc cleared the outbound - blocking bit when send_existing() returned PACKET_NONE and *ret=0, as - opposed to before even calling send_existing(), but because *ret=1 when - sending parts 2..n of an existing packet, the bit would only be cleared - when calling libssh2_transport_write() for a new packet. +- SSH_MSG_CHANNEL_OPEN_FAILURE: used defined values - Clear the direction flag after the final part of a packet has been sent. + We don't like magic numbers in the code. Now the acceptable failure + codes sent in the SSH_MSG_CHANNEL_OPEN_FAILURE message are added as + defined values in the private header file. -Daniel Stenberg (24 Mar 2010) -- Added man page for libssh2_knownhost_addc() +- sftp_write: don't return EAGAIN if no EAGAIN was received - Added mention in libssh2_knownhost_add() docs that - libssh2_knownhost_addc() is the preferred function now. - -- at next soname bump remove libssh2_knownhost_add() - -- ignore TAGS ("make tags" makes them) + This function now only returns EAGAIN if a lower layer actually returned + EAGAIN to it. If nothing was acked and no EAGAIN was received, it will + now instead return 0. -- fix memory leak +- _libssh2_wait_socket: detect nothing-to-wait-for - we must not assign the pointer a NULL since it keeps allocated - data and at least parts of an error string + If _libssh2_wait_socket() gets called but there's no direction set to + wait for, this causes a "hang". This code now detects this situation, + set a 1 second timeout instead and outputs a debug output about it. -- fixed the pattern for avoiding the poll check +- decomp: remove the free_dest argument - added some comments about known problems with poll on darwin + Since the decompress function ALWAYS returns allocated memory we get a + lot simpler code by removing the ability to return data unallocated. -- avoid checking for poll on some systems +- decomp: cleaned off old compression stuff - darwin and interix are known to have broken poll implementations - so we skip the check on those and thus have them use select - unconditionally + I cleared off legacy code from when the compression and decompression + functions were a single unified function. Makes the code easier to read + too. -- ignore libssh2.dsp +- [TJ Saunders brought this change] -Simon Josefsson (23 Mar 2010) -- Fix logic in "on-the-fly" crypto init. + decomp: increase decompression buffer sizes -- Make sure keepalive is working even when poll is used. +- [TJ Saunders brought this change] -- [Paul Querna brought this change] + zlib: Add debug tracing of zlib errors - Use poll when available on blocking API. +- sftp_packet_read: handle partial reads of the length field - Signed-off-by: Simon Josefsson + SFTP packets come as [32 bit length][payload] and the code didn't + previously handle that the initial 32 bit field was read only partially + when it was read. -Peter Stuge (20 Mar 2010) -- Fix speling +- [Jasmeet Bagga brought this change] -Daniel Stenberg (19 Mar 2010) -- fix NULL dereference when window adjusting a non-existing channel + kex_agree_hostkey: fix NULL pointer derefence + + While setting up the session, ssh tries to determine the type of + encryption method it can use for the session. This requires looking at + the keys offered by the remote host and comparing these with the methods + supported by libssh2 (rsa & dss). To do this there is an iteration over + the array containing the methods supported by libssh2. - Suyog Jadhav pointed out that when receiving a window adjust to - a channel not found, the code would reference a NULL pointer. - Now it will instead output a message about that fact. + If there is no agreement on the type of encryption we come to the 3rd + entry of the hostkeyp array. Here hostkeyp is valid but *hostkep is + NULL. Thus when we dereference that in (*hostkeyp)->name there is a + crash -Simon Josefsson (19 Mar 2010) -- Fix build problem. +- _libssh2_transport_send: remove dead assignment + + 'data' isn't accessed beyond this point so there's no need to assign it. -- Eat our own dog food, call libssh2_init and libssh2_exit in the examples. +- scp_recv: remove dead assignment + + Instead of assigning a variable we won't read, we now use the more + explicit (void) prefix. -- Fix init/exit logic. Add self-test of it. +- sftp_write: removed superfluous assignment -Daniel Stenberg (19 Mar 2010) -- fix typo +- bugfix: avoid use of uninitialized value -Simon Josefsson (19 Mar 2010) -- Add man page for libssh2_init and libssh2_exit. Fix libssh2_exit prototype. +- sftp_packet_require: propagate error codes better + + There were some chances that they would cause -1 to get returned by + public functions and as we're hunting down all such occurances and since + the underlying functions do return valuable information the code now + passes back proper return codes better. -- Shorten constant a bit. More documentation. +- [Alfred Gebert brought this change] -- Fix namespace pollution. + fix memory leaks (two times cipher_data) for each sftp session -- Add global init/exit points, to do crypto initialization in one place. +- libssh2_userauth_authenticated: make it work as documented - By Lars Nordin. - -Daniel Stenberg (14 Mar 2010) -- libssh2 is released under the Modifed BSD license, not GPL - -Alexander Lamaison (14 Mar 2010) -- Add libssh2_knownhost_addc to handle comments. + The man page clearly says it returns 1 for "already authenticated" but + the code said non-zero. I changed the code to use 1 now, as that is also + non-zero but it gets the benefit that it now matches the documentation. - Comments in known_hosts file were not handle properly. They were parsed as - part of the key causing key matching to return a mismatch if the entry had a - comment. This adds a new API function that takes an optional comment and - changes libssh2_knownhost_readline to parse the comment as pass it to the - new function. + Using 1 instead of non-zero is better for two reasons: - Fixes #164. + 1. We have the opportunity to introduce other return codes in the future for + things like error and what not. + 2. We don't expose the internal bitmask variable value. -- Fix gettimeofday to compile with Visual C++ 6. - - Reported by Steven Van Ingelgem. +- userauth_keyboard_interactive: fix indent -Simon Josefsson (10 Mar 2010) -- Add. +- [Alfred Gebert brought this change] -- keepalive.c: Fix libssh2_error usage. + fix memory leak in userauth_keyboard_interactive() + + First I wanted to free the memory in session_free() but then + I had still memory leaks because in my test case the function + userauth_keyboard_interactive() is called twice. It is called + twice perhaps because the server has this authentication + methods available: publickey,gssapi-with-mic,keyboard-interactive + The keyboard-interactive method is successful. -- Fix typo in last commit. +- dist: include sftp.h in dist archives -- Tidy up build option notice. +Simon Josefsson (27 Oct 2010) +- Update header to match new function prototype, see c48840ba88. -- Add entry about keep alive stuff. +Daniel Stenberg (26 Oct 2010) +- bugfixes: the transport rearrange left some subtle flaws now gone -- Add keep-alive support. +- libssh2_userauth_publickey_fromfile_ex.3: cleaned up looks -Alexander Lamaison (7 Mar 2010) -- Untabify. +- libssh2_userauth_publickey: add man page + + I found an undocumented public function and we can't have it like + that. The description here is incomplete, but should serve as a template + to allow filling in... -- Fix memory leak in libssh2_knownhost_add. +- libssh2_sftp_write.3: added blurb about the "write ahead" + + Documented the new SFTP write concept -Daniel Stenberg (6 Mar 2010) -- change 'int' to 'libssh2_socket_t' in the public API for sockets +- sftp_close_handle: free any trailing write chunks -- reduce code duplication and return underlying error better +- _libssh2_channel_write: fix warnings -- acknowledge when _libssh2_packet_requirev() returns error +- SFTP: bufgix, move more sftp stuff to sftp.h - when _libssh2_packet_requirev() returns an error when waiting for - SSH_MSG_USERAUTH_SUCCESS or SSH_MSG_USERAUTH_FAILURE, it is an - error and it should be treated as such - -- wrap long lines - -- polished the phrasing in two error strings + The sftp_write function shouldn't assume that the buffer pointer will be + the same in subsequent calls, even if it assumes that the data already + passed in before haven't changed. + + The sftp structs are now moved to sftp.h (which I forgot to add before) -- silence picky compiler warnings +- SFTP: use multiple outgoing packets when writing + + sftp_write was rewritten to split up outgoing data into multiple packets + and deal with the acks in a more asynchronous manner. This is meant to + help overcome latency and round-trip problems with the SFTP protocol. -- silence picky compiler warnings +- TODO: implemented a lot of the ideas now -- removed libssh2_error()'s forth argument +- _libssh2_channel_write: removed 32500 size limit - libssh2_error() no longer allocates a string and only accepts a const - error string. I also made a lot of functions use the construct of - return libssh2_error(...) instead of having one call to - libssh2_error() and then a separate return call. In several of those - cases I then also changed the former -1 return code to a more - detailed one - something that I think will not change behaviors - anywhere but it's worth keeping an eye open for any such. - -- repaired --enable-debug + Neither _libssh2_channel_write nor sftp_write now have the 32500 size + limit anymore and instead the channel writing function now has its own + logic to send data in multiple calls until everything is sent. -Simon Josefsson (1 Mar 2010) -- Make ./configure output a summary of build options. +- send_existing: don't tell parent to return when drained + + That will just cause unnecessary code execution. -Daniel Stenberg (1 Mar 2010) -- let the err_msg in the session struct be const too +- _libssh2_channel_write: general code cleanup + + simplified the function and removed some unused struct fields -Simon Josefsson (1 Mar 2010) -- Revert #ifdef change that pulled in AES-CTR code when explicitly disabled. +- _libssh2_transport_send: replaces _libssh2_transport_write + + The new function takes two data areas, combines them and sends them as a + single SSH packet. This allows several functions to allocate and copy + less data. + + I also found and fixed a mixed up use of the compression function + arguments that I introduced in my rewrite in a recent commit. -Daniel Stenberg (1 Mar 2010) -- fix #ifdefs +- scp_write_nonblock: use select() instead of busyloop + + Make this example nicer by not busylooping. -- make function match the new proto +- send_existing: clear olen when the data is sent off -Simon Josefsson (1 Mar 2010) -- Improve AES-CTR check. +- _libssh2_transport_write: allow 256 extra bytes around the packet -Daniel Stenberg (1 Mar 2010) -- use const to silence a bazillion warnings +- _libssh2_transport_write: remade to send without malloc -Simon Josefsson (1 Mar 2010) -- Use AES-CTR from OpenSSL when available. +- compress: compression disabled by default - Reported by Lars Nordin . + We now allow libssh2_session_flag() to enable compression with a new + flag and I added documentation for the previous LIBSSH2_FLAG_SIGPIPE + flag which I wasn't really aware of! -- Make it possible to disable DSA. +- comp: split the compress function - Patch from Lars Nordin . + It is now made into two separate compress and decompress functions. In + preparation for upcoming further modficications. -Peter Stuge (1 Mar 2010) -- Send and receive channel EOF before sending SSH_MSG_CHANNEL_CLOSE - - Sending SSH_MSG_CHANNEL_CLOSE without channel EOF is explicitly allowed - in RFC 4254, but some non-conforming servers will hang or time out when - the channel is closed before EOF. - - Other common clients send and receive EOF before closing, there are no - drawbacks, and some servers need it to work correctly. +Dan Fandrich (20 Oct 2010) +- Added header file to allow compiling in older environments -Alexander Lamaison (26 Feb 2010) -- Style improvements to knownhost error handling. - - Made improvements as suggested by Peter Stuge: http://www.libssh2.org/mail/libssh2-devel-archive-2010-02/0161.shtml. +Daniel Stenberg (20 Oct 2010) +- TODO: add a possible new API for SFTP transfers -- Call libssh2_error for every knownhost API failure. - - The libssh2 API calls should set the last error code and a message when - returning a failure by calling libssh2_error. This changeset adds these - calls to the libssh2_knownhost_* API as well as libssh2_base64_decode. - - This change also makes libssh2_error into a function rather than a macro. - Its implementation is moved to misc.c. This function returns the error - code passed to it allowing callers to return the error value directly - without duplicating the error code. +- TODO: "New Transport API" added -- Fix LIBSSH2_ALLOC checks. - - These appear to be cut-and paste errors where the wrong variable is checked - for NULLness after calling LIBSSH2_ALLOC. +- TODO: add buffering plans -Simon Josefsson (23 Feb 2010) -- Silence compiler warning. +Simon Josefsson (13 Oct 2010) +- Mention libssh2_channel_get_exit_signal and give kudos. -- Make it portable; test uses = for string comparison (not ==). Indent. +- [Tommy Lindgren brought this change] -Alexander Lamaison (22 Feb 2010) -- libssh2_knownhost_del: fix write to freed memory. - - When removing a known host, libssh2_knownhost_del would remove the node from the linked list, free its memory and then overwrite the struct parameter (which indicated which node to remove) with 0. However, this struct is actually allocated within the just-freed node meaning we're writing to freed memory. This made Windows very upset. + Add libssh2_channel_get_exit_signal man page. - The fix is simply to overwrite the struct first before freeing the memory. + Signed-off-by: Simon Josefsson -Daniel Stenberg (21 Feb 2010) -- show more verbose error when SCP send fails +- [Tommy Lindgren brought this change] -- libssh2_socket_t is done, a library-free function is needed + Add libssh2_channel_get_exit_signal. + + Signed-off-by: Simon Josefsson -- clarify that this frees all data associated with a session +- Add libssh2_free man page and fix typo. -- improved error handling +- Add libssh2_free. -- add missing libssh2_error() calls +Daniel Stenberg (11 Oct 2010) +- scp_recv: improved treatment of channel_read() returning zero - To make sure the public API is functional and that the - BLOCK_ADJUST_ERRNO() macro works correctly we MUST make sure to - call libssh2_error() when we return errors. + As a zero return code from channel_read() is not an error we must make + sure that the SCP functions deal with that properly. channel_read() + always returns 0 if the channel is EOFed already so we check for EOF + after 0-reads to be able to return error properly. + +- libssh2_session_methods.3: detail what can be asked for -- fix memory leak in userauth_keyboard_interactive() +- compression: send zlib before none - Mr anonymous in bug #125 pointed out that the userauth_keyboard_interactive() - function does in fact assign the same pointer a second time to a new allocated - buffer without properly freeing the previous one, which caused a memory leak. + As the list of algorithms in a preferred order we should send zlib + before none to increase the chances that the server will let us do + compression. -- added missing error codes +- compress: faster check, better return codes - To allow the libssh2_session_last_error() function to work as - documented, userauth_password() now better makes sure to call - libssh2_error() everywhere before it returns error. + In the transport functions we avoid a strcmp() now and just check a + boolean instead. - Pointed out by mr anonymous in bug #128 + The compress/decompress function's return code is now acknowledged and + used as actual return code in case of failures. -Peter Stuge (16 Feb 2010) -- Fix resource and memory leaks in examples as reported by cppcheck +- libssh2_session_handshake: replaces libssh2_session_startup() - Thanks to Ettl Martin for the report and patch. This fixes #132 - -Daniel Stenberg (15 Feb 2010) -- mention the new man pages for macros + The function libssh2_session_startup() is now considered deprecated due + to the portability issue with the socket argument. + libssh2_session_handshake() is the name of the replacement. -- added man pages for API macros +- libssh2_socket_t: now externally visible - all #defined macros in the public headers are considered to be part - of the API and I've generated individual man pages for each of them - to A) make it easier to figure out what each function/macro actually - is for so that automated lookups work better and for B) make sure we - have all public functions document (both macros and functions) to - make it easier for us to work away from all the macros in a future - release. + In preparation for upcominig changes, the libssh2_socket_t type is now + typedef'ed in the public header. -- Committed the patch by Yoichi Iwaki in bug #2929647 +- _libssh2_transport_drain: removed - Committed the patch by Yoichi Iwaki in bug #2929647, which fixed a memory - leak when an 'outbuf' was still allocated when a session was freed. + This function proved not to be used nor useful. -- free "outbuf" when killing a session +- _libssh2_channel_write: don't iterate over transport writes - Fix memoary leak: if there was an "output" still allocated when a - session was torn down it needs to be freed in session_free() + When a call to _libssh2_transport_write() succeeds, we must return from + _libssh2_channel_write() to allow the caller to provide the next chunk + of data. - Patch by Yoichi Iwaki in bug #2929647 - -- the working version name is now 1.2.5_DEV - -Version 1.2.4 (13 Feb 2010) - -Daniel Stenberg (13 Feb 2010) -- updated info for 1.2.4 - -Dan Fandrich (10 Feb 2010) -- Allow compiling with OpenSSL when AES isn't available. + We cannot move on to send the next piece of data that may already have + been provided in this same function call, as we risk getting EAGAIN for + that and we can't return information both about sent data as well as + EAGAIN. So, by returning short now, the caller will call this function + again with new data to send. -Peter Stuge (9 Feb 2010) -- [Dave McCaldon brought this change] +- _libssh2_transport_write: updated documentation blurb - Fix Tru64 socklen_t compile issue with example/direct_tcpip.c - - Building libssh2-1.2.3 on Tru64 fails at line 48 and 166 because socklen_t - isn't defined on Tru64 unless _POSIX_PII_SOCKET is defined. +- _libssh2_transport_write: remove fprintf remainder - This patch updates configure.ac to add -D_POSIX_PII_SOCKET when building - on Tru64 platform(s). - -- [Dave McCaldon brought this change] + Mistake from previous debugging - Resolve compile issues on Solaris x64 and UltraSPARC +- session: improved errors - Solaris builds of libssh2-1.2.3 failed on both x64 and UltraSPARC - platforms because of two problems: + Replaced -1/SOCKET_NONE errors with appropriate error defines instead. - 1) src/agent.c:145 sun is a reserved word when using the SUNWspro compiler - 2) example/direct_tcpip.c:84 INADDR_NONE is not defined - -Daniel Stenberg (3 Feb 2010) -- towards 1.2.4 now - -Version 1.2.3 (3 Feb 2010) + Made the verbose trace output during banner receiving less annoying for + non-blocking sessions. -Daniel Stenberg (3 Feb 2010) -- Version 1.2.3 (February 3, 2010) +- crypt_init: use correct error define -- fix building out of source tree by proving better include path +- _libssh2_error: hide EAGAIN for non-blocking sessions - when building out of source tree, we provide -I$(top_builddir)/example - since the libssh2_config.h gets generated in that dir - -Peter Stuge (1 Feb 2010) -- [Sofian Brabez brought this change] + In an attempt to make the trace output less cluttered for non-blocking + sessions the error function now avoids calling the debug function if the + error is the EAGAIN and the session is non-blocking. - Replace : in hexdump with " " (two spaces) +- agent: use better error defines -- Detect when the forwarded connection is closed in example/direct_tcpip.c +- comp_method_zlib_init: use correct error defines -- Fix example/direct_tcpip.c to work also on WIN32 +- transport: better error codes - read() and write() are no good for WIN32 sockets, use recv() and send(). - -- Ignore libssh2_config.h.in and stamp-h2 in example/ and remove .cvsignore - -- Simplify WIN32 ifdefs in example/direct_tcpip.c to allow standalone compile + LIBSSH2_SOCKET_NONE (-1) should no longer be used as error code as it is + (too) generic and we should instead use specific and dedicated error + codes to better describe the error. -- Always #define INVALID_SOCKET -1 in libssh2_priv.h when not on win32 +- channel: return code and _libssh2_error cleanup - Fix broken builds since commit abd9bd0bbe631efeada1f54552c70b54e1c490c1 - for all non-win32 platforms. - -- Include hmac-md5 and hmac-md5-96 only if crypto backend supports MD5 + Made sure that all transport_write() call failures get _libssh2_error + called. -- Use LIBSSH2_HOSTKEY_HASH_SHA1 instead of _MD5 in examples and tests +- _libssh2_channel_write: limit to 32700 bytes - MD5 support is optional and may not always be available, while SHA1 is both - required and recommended. - -- Update mailing list address in configure.ac to @cool.haxx.se - -- Make example/direct_tcpip.c compile for win32 + The well known and used ssh server Dropbear has a maximum SSH packet + length at 32768 by default. Since the libssh2 design current have a + fixed one-to-one mapping from channel_write() to the packet size created + by transport_write() the previous limit of 32768 in the channel layer + caused the transport layer to create larger packets than 32768 at times + which Dropbear rejected forcibly (by closing the connection). - One warning from FD_SET() remains, it is also in some other examples. - -- Correctly check for an invalid socket in session_startup() - -- Small documentation fix after Dave's _USERAUTH_FAILURE improvement + The long term fix is of course to remove the hard relation between the + outgoing SSH packet size and what the input length argument is in the + transport_write() function call. -- [Dave McCaldon brought this change] +- libssh.h: add more dedicated error codes - Handle SSH_MSG_USERAUTH_FAILURE for password and kbd-int authentication +- SCP: allow file names with bytes > 126 - Neither libssh2_userauth_password_ex() nor - libssh2_userauth_keyboard_interactive_ex() would return a login failure - error if the server responded with a SSH_MSG_USERAUTH_FAILURE, instead - you would see whatever previous error had occurred, typically - LIBSSH2_ERROR_EAGAIN. + When parsing the SCP protocol and verifying that the data looks like a + valid file name, byte values over 126 must not be consider illegal since + UTF-8 file names will use such codes. - This patch changes error code -18 to LIBSSH2_ERROR_AUTHENTICATION_FAILED - and makes LIBSSH2_ERROR_PUBLICKEY_UNRECOGNIZED an alias for - LIBSSH2_ERROR_AUTHENTICATION_FAILED. In addition, new logic in - userauth_password() properly handles SSH_MSG_USERAUTH_FAILURE and both - this function and userauth_keyboard_interactive() now properly return - LIBSSH2_ERROR_AUTHENTICATION_FAILED. - -Simon Josefsson (28 Jan 2010) -- Fix. + Reported by: Uli Zappe + Bug: http://www.libssh2.org/mail/libssh2-devel-archive-2010-08/0112.shtml -- Also deal with GLOBAL_REQUEST keep-alives. +Dan Fandrich (25 Aug 2010) +- Document the three sftp stat constants -- Make OpenSSH-style keepalive work against libssh2 clients. +Guenter Knauf (18 Aug 2010) +- Fixed Win32 makefile which was now broken at resource build. -Daniel Stenberg (27 Jan 2010) -- clarified +- It is sufficient to pipe stderr to NUL to get rid of the nasty messages. -Peter Stuge (26 Jan 2010) -- [Dave McCaldon brought this change] +- [Author: Guenter Knauf brought this change] - Fix trace context lookup in libssh2_debug() - - The trace context is actually a bitmask so that tracing output can be - controlled by setting a bitmask using libssh2_trace(). However, the logic - in libssh2_debug() that converted the context to a string was using the - context value as an array index. Because the code used a bounds check on - the array, there was never a danger of a crash, but you would certainly - either get the wrong string, or "unknown". + Removed Win32 ifdef completely for sys/uio.h. - This patch adds a lookup that iterates over the context strings and uses - it's index to check for the corresponding bit in the context. - -- Fix typo in RELEASE-NOTES - -Daniel Stenberg (20 Jan 2010) -- updated for 1.2.3 with all the stuff I found in the log + No idea why we had this ifdef at all but MSVC, MingW32, Watcom + and Borland all have no sys/uio.h header; so if there's another + Win32 compiler which needs it then it should be added explicitely + instead of this negative list. -- ignore more generated files +- New files should also be added to Makefile.am. + + Otherwise they will never be included with release and snapshot tarballs ... -- [Dave McCaldon brought this change] +Daniel Stenberg (18 Aug 2010) +- version: bump to 1.2.8_DEV - Pass user context through libssh2_trace_sethandler() to callback - - The libssh2_trace_sethandler() call allows the user to handle the output of libssh2 rather than having it written to stderr. This patch updates libssh2_trace_sethandler() to allow a user-defined void* context value to be passed back to the output handler. +Version 1.2.7 (17 Aug 2010) -- [Dave McCaldon brought this change] +Daniel Stenberg (17 Aug 2010) +- release: updated to hold 1.2.7 info - Add libssh2_trace_sethandler() to the API (even more) +Guenter Knauf (17 Aug 2010) +- Use the new libssh2.rc file. -- [Dave McCaldon brought this change] +- Added resource file for libssh2.dll (shamelessly stolen from libcurl). - Add libssh2_trace_sethandler() to the API +- Updated Win32 MSVC dependencies versions. -- cleanup includes - - We now produce a local libssh2_config.h file in this dir for the - examples to use so I cleaned up the include path at the same time. +- Added include for sys/select.h to get fd.set on some platforms. -- generate a libssh2_config.h in the example dir +- Added Watcom makefile borrowed from libcurl. - buildconf copies the template to example/ and configure makes sure - to generate a proper file from it and the direct_tcpip.c example - is the first one to use it - to make sure it builds fine on more - paltforms - -Simon Josefsson (13 Jan 2010) -- Remove redundant #includes and reorder sys/types.h include. + This makefile compiles already all files fine for static lib, but needs + final touch when I have OpenSSL fully working with shared libs and Watcom. -Daniel Stenberg (10 Jan 2010) -- avoid a free(NULL) +- Added copyright define to libssh2.h and use it for binary builds. -Simon Josefsson (7 Jan 2010) -- Make it simpler to get more debug info. +- Moved version defines up in order to include from .rc file. + + Blocked rest of header with ifndef so its possible to let + the rc compiler only use the version defines. -Daiki Ueno (1 Jan 2010) -- Simplify the commit 63457dfa using type cast from size_t * to ulong *. +- Some minor makefile tweaks. -Alexander Lamaison (30 Dec 2009) -- Fixed memory leak in userauth_publickey(). +Daniel Stenberg (2 Aug 2010) +- example: treat the libssh2_channel_read() return code properly - userauth_publickey_fromfile() reads the key from a - file using file_read_publickey() which returns two - allocated strings, the decoded key and the key - method (such as "ssh-dss"). The latter can be - derived from the former but returning both avoids a - later allocation while doing so. + A short read is not an error. Only negative values are errors! + +- libssh2_wait_socket: reset error code to "leak" EAGAIN less - Older versions of userauth_publickey_fromfile() used - this method string directly but when - userauth_publickey() was factored out of - userauth_publickey_fromfile() it derived the method - from the key itself. This resulted in the method - being allocated twice. + Since libssh2 often sets LIBSSH2_ERROR_EAGAIN internally before + _libssh2_wait_socket is called, we can decrease some amount of + confusion in user programs by resetting the error code in this function + to reduce the risk of EAGAIN being stored as error when a blocking + function returns. + +- _libssh2_wait_socket: poll needs milliseconds - This fix, which maintains the optimisation that - avoids an extra allocation, changes - userauth_publickey() so it doesn't allocate and - derive the method when userauth_pblc_method already - has a value. + As reported on the mailing list, the code path using poll() should + multiple seconds with 1000 to get milliseconds, not divide! - Signed-off-by: Alexander Lamaison - -Daiki Ueno (25 Dec 2009) -- Fix the return value description of libssh2_knownhost_free(). - -- Fix compiler warnings for size_t pointers on 32-bit Windows. + Reported by: Jan Van Boghout -- Define INVALID_SOCKET and use it instead of SOCKET_BAD. +- typedef: make ssize_t get typedef without LIBSSH2_WIN32 - Revert the part of previous commit that defines SOCKET_BAD library wide. - -- Use libssh2_socket_t in the ssh-agent stuff. - Define a portability macro SOCKET_BAD which means "invalid socket". - -- Mark/unmark connection to Pageant is open/close. + The condition around the ssize_t typedef depended on both LIBSSH2_WIN32 + *and* _MSC_VER being defined when it should be enough to depend on + _MSC_VER only. It also makes it nicer so libssh2-using code builds fine + without having custom defines. -- Add test to check if the socket is connected. +- [John Little brought this change] -Peter Stuge (24 Dec 2009) -- Add libssh2.pc to top-level .gitignore + session_free: free more data to avoid memory leaks -- Fix publickey authentication regression +- channel_free: ignore problems with channel_close() - Commit 70b199f47659a74b8778c528beccf893843e5ecb introduced a parsing - bug in file_read_publickey() which made the algorithm name contain an - extra trailing space character, breaking all publickey authentication. - -- Add a direct-tcpip example which shows local port forwarding - -- Add session parameter and LIBSSH2_TRACE_SOCKET to libssh2_trace(3) man page - -- Add TODO: Expose error messages sent by the server - -Daiki Ueno (23 Dec 2009) -- Fix doc comments. - -- Add man pages for ssh-agent API. - -- Don't request userauthlist after authentication. - -Simon Josefsson (21 Dec 2009) -- Add. - -- [Daiki Ueno brought this change] - - Add an example to use ssh-agent API. + As was pointed out in bug #182, we must not return failure from + _libssh2_channel_free() when _libssh2_channel_close() returns an error + that isn't EAGAIN. It can effectively cause the function to never go + through, like it did now in the case where the socket was actually + closed but socket_state still said LIBSSH2_SOCKET_CONNECTED. - Signed-off-by: Simon Josefsson - -- [Daiki Ueno brought this change] + I consider this fix the right thing as it now also survives other + errors, even if making sure socket_state isn't lying is also a good + idea. - Add ssh-agent API. +- publickey_list_free: no return value from a void function - Signed-off-by: Simon Josefsson + Fixed a compiler warning I introduced previously when checking input + arguments more. I also added a check for the other pointer to avoid NULL + pointer dereferences. -- [Daiki Ueno brought this change] +- [Lars Nordin brought this change] - Add callback-based API for publickey auth. + openssl: make use of the EVP interface - Signed-off-by: Simon Josefsson - -- Move examples from example/simple to example/. + Make use of the EVP interface for the AES-funktion. Using this method + supports the use of different ENGINES in OpenSSL for the AES function + (and the direct call to the AES_encrypt should not be used according to + openssl.org) -- Move examples from example/simple to example/. +Peter Stuge (23 Jun 2010) +- [Tor Arntsen brought this change] -Daniel Stenberg (17 Dec 2009) -- _libssh2_list_insert() fixed to work + Don't overflow MD5 server hostkey - While this is code not currently in use, it is part of the generic linked - list code and since I found the error I thought I'd better fix it since we - might bring in this function into the code one day. + Use SHA_DIGEST_LENGTH and MD5_DIGEST_LENGTH in memcpy instead of hardcoded + values. An incorrect value was used for MD5. -Simon Josefsson (16 Dec 2009) -- Silence compiler warnings. +- Fix message length bugs in libssh2_debug() - Based on patch by Kamil Dudka in - . - -- [Kamil Dudka brought this change] - - libgcrypt: simplify code of _libssh2_dsa_sha1_sign + There was a buffer overflow waiting to happen when a debug message was + longer than 1536 bytes. - Signed-off-by: Simon Josefsson - -- [Kamil Dudka brought this change] + Thanks to Daniel who spotted that there was a problem with the message + length passed to a trace handler also after commit + 0f0652a3093111fc7dac0205fdcf8d02bf16e89f. - libgcrypt: follow-up for ssh-dss padding fix +- Make libssh2_debug() create a correctly terminated string - Signed-off-by: Simon Josefsson + Also use FILE *stderr rather than fd 2, which can very well be something + completely different. -Dan Fandrich (15 Dec 2009) -- Check for the right environment variable in the test app +Daniel Stenberg (23 Jun 2010) +- [TJ Saunders brought this change] -Simon Josefsson (14 Dec 2009) -- Silence warning about unused function parameter. + handshake: Compression enabled at the wrong time - Reported by Steven Van Ingelgem . - -Daniel Stenberg (10 Dec 2009) -- avoid returning data to memory already freed + In KEXINIT messages, the client and server agree on, among other + things, whether to use compression. This method agreement occurs + in src/kex.c's kex_agree_methods() function. However, if + compression is enabled (either client->server, server->client, or + both), then the compression layer is initialized in + kex_agree_methods() -- before NEWKEYS has been received. - In case of failure we must make sure that the data we return - doesn't point to a memory area already freed. Reported anonymously - in the bug report #2910103. - -Peter Stuge (8 Dec 2009) -- Use LIBSSH2_TRACE_* internally and remove redundant LIBSSH2_DBG_* - -- Add LIBSSH2_TRACE_SOCKET context for tracing send() and recv() + Instead, the initialization of the compression layer should + happen after NEWKEYS has been received. This looks to occur + insrc/kex.c's diffie_hellman_sha1(), which even has the comment: - Helpful in debugging the -39 errors. - -- Another transport layer fix for bogus -39 (LIBSSH2_ERROR_BAD_USE) errors + /* The first key exchange has been performed, - Commit 683aa0f6b52fb1014873c961709102b5006372fc made send_existing() send - more than just the second part of a packet when the kernel did not accept - the full packet, but the function still overlooked the SSH protocol - overhead in each packet, often 48 bytes. + switch to active crypt/comp/mac mode */ - If only the last few bytes of a packet remained, then the packet would - erroneously be considered completely sent, and the next call to write - more data in the session would return a -39 error. - -Daniel Stenberg (6 Dec 2009) -- move local variable to be more localized + There, after NEWKEYS is received, the cipher and mac algorithms + are initialized, and that is where the compression should be + initialized as well. + + The current implementation fails if server->client compression is + enabled because most server implementations follow OpenSSH's + lead, where compression is initialized after NEWKEYS. Since the + server initializes compression after NEWKEYS, but libssh2 + initializes compression after KEXINIT (i.e. before NEWKEYS), they + are out of sync. + + Reported in bug report #180 -- fixed some indent mistakes +- [TJ Saunders brought this change] -Peter Stuge (6 Dec 2009) -- Fix padding in ssh-dss signature blob encoding - - DSA signatures consist of two 160-bit integers called r and s. In ssh-dss - signature blobs r and s are stored directly after each other in binary - representation, making up a 320-bit (40 byte) string. (See RFC4253 p14.) + userauth_hostbased_fromfile: packet length too short - The crypto wrappers in libssh2 would either pack r and s incorrectly, or - fail, when at least one integer was small enough to be stored in 19 bytes - or less. + The packet length calculated in src/userauth.c's + userauth_hostbased_fromfile() function is too short by 4 bytes; + it forgets to add four bytes for the length of the hostname. + This causes hostbased authentication to fail, since the server + will read junk data. - The patch ensures that r and s are always stored as two 160 bit numbers. + verified against proftpd's mod_sftp module -- Don't always clear write direction blocking flag +- _libssh2_userauth_publickey: reject method names longer than the data - When libssh2_transport_write() is called to continue sending a - partially sent packet the write direction flag must not be cleared - until the previous packet has been completely sent, or the app would - hang if the packet still isn't sent completely, since select() gets - called by the internal blocking emulation layer in libssh2 but would - then not be watching the socket for writability. + This functions get the method length by looking at the first 32 + bit of data, and I now made it not accept method lengths that are + longer than the whole data set is, as given in the dedicated + function argument. - Clear the flag only once processing of previous packet data is - complete and a new packet is about to be prepared. - -Alexander Lamaison (24 Nov 2009) -- Detabify. - -- [Daniel Stenberg brought this change] - - Fixed memory leak in sftp_fstat(). - -Simon Josefsson (17 Nov 2009) -- Mark date of 1.2.2 release. - -- Merge branch 'master' of ssh://git.stuge.se/var/lib/git/libssh2 - -Version 1.2.2 (16 Nov 2009) - -Daniel Stenberg (16 Nov 2009) -- prepared for 1.2.2 - -Simon Josefsson (16 Nov 2009) -- Improve NEWS items. - -- Support AES-Counter ciphers. + This was detected when the function was given bogus public key + data as an ascii string, which caused the first 32bits to create + a HUGE number. -- Silence compiler warning. +- NULL resistance: make more public functions survive NULL pointer input - Reported by Steven Van Ingelgem - in . - -- Mention libssh2-style.el. + Sending in NULL as the primary pointer is now dealt with by more + public functions. I also narrowed the userauth.c code somewhat to + stay within 80 columns better. -- Use memmove instead of memcpy on overlapping memory areas. +- agent: make libssh2_agent_userauth() work blocking properly - Reported by Bob Alexander in - . - -- Add. + previously it would always work in a non-blocking manner -- Protect against crash on too small SSH_MSG_IGNORE packets. +Peter Stuge (17 Jun 2010) +- Fix underscore typo for 64-bit printf format specifiers on Windows - Reported by Bob Alexander - in . + Commit 49ddf447ff4bd80285f926eac0115f4e595f9425 was missing underscores. -- add copyright line +Daniel Stenberg (16 Jun 2010) +- libssh2_session_callback_set: extended the man page diff --git a/vendor/libssh2/RELEASE-NOTES b/vendor/libssh2/RELEASE-NOTES index 5b78ede381..62064a9fe6 100644 --- a/vendor/libssh2/RELEASE-NOTES +++ b/vendor/libssh2/RELEASE-NOTES @@ -1,31 +1,62 @@ -libssh2 1.8.0 +libssh2 1.10 -This release includes the following changes: +This release includes the following enhancements and bugfixes: - o added a basic dockerised test suite - o crypto: add support for the mbedTLS backend + o adds agent forwarding support + o adds OpenSSH Agent support on Windows + o adds ECDSA key support using the Mbed TLS backend + o adds ECDSA cert authentication + o adds diffie-hellman-group14-sha256, diffie-hellman-group16-sha512, + diffie-hellman-group18-sha512 key exchanges + o adds support for PKIX key reading when using ed25519 with OpenSSL + o adds support for EWOULDBLOCK on VMS systems + o adds support for building with OpenSSL 3 + o adds support for using FIPS mode in OpenSSL + o adds debug symbols when building with MSVC + o adds support for building on the 3DS + o adds unicode build support on Windows + o restores os400 building + o increases min, max and opt Diffie Hellman group values + o improves portiablity of the make file + o improves timeout behavior with 2FA keyboard auth + o various improvements to the Wincng backend + o fixes reading parital packet replies when using an agent + o fixes Diffie Hellman key exchange on Windows 1903+ builds + o fixes building tests with older versions of OpenSSL + o fixes possible multiple definition warnings + o fixes potential cast issues _libssh2_ecdsa_key_get_curve_type() + o fixes potential use after free if libssh2_init() is called twice + o improved linking when using Mbed TLS + o fixes call to libssh2_crypto_exit() if crypto hasn't been initialized + o fixes crash when loading public keys with no id + o fixes possible out of bounds read when exchanging keys + o fixes possible out of bounds read when reading packets + o fixes possible out of bounds read when opening an X11 connection + o fixes possible out of bounds read when ecdh host keys + o fixes possible hang when trying to read a disconnected socket + o fixes a crash when using the delayed compression option + o fixes read error with large known host entries + o fixes various warnings + o fixes various small memory leaks + o improved error handling, various detailed errors will now be reported + o builds are now using OSS-Fuzz + o builds now use autoreconf instead of a custom build script + o cmake now respects install directory + o improved CI backend + o updated HACKING-CRYPTO documentation + o use markdown file extensions + o improved unit tests -This release includes the following bugfixes: - - o libgcrypt: fixed a NULL pointer dereference on OOM - o VMS: can't use %zd for off_t format - o VMS: update vms/libssh2_config.h - o windows: link with crypt32.lib - o libssh2_channel_open: speeling error fixed in channel error message - o msvc: fixed 14 compilation warnings - o tests: HAVE_NETINET_IN_H was not defined correctly - o openssl: add OpenSSL 1.1.0 compatibility - o cmake: Add CLEAR_MEMORY option, analogously to that for autoconf - o configure: make the --with-* options override the OpenSSL default - o libssh2_wait_socket: set err_msg on errors - o libssh2_wait_socket: Fix comparison with api_timeout to use milliseconds - This release would not have looked like this without help, code, reports and advice from friends like these: - Alexander Lamaison, Antenore Gatta, Brad Harder, Charles Collicutt, - Craig A. Berry, Dan Fandrich, Daniel Stenberg, Kamil Dudka, Keno Fischer, - Taylor Holberton, Viktor Szakats, Will Cosgrove, Zenju - (12 contributors) + katzer, Orgad Shaneh, mark-i-m, Zenju, axjowa, Thilo Schulz, + Etienne Samson, hlefebvre, seba30, Panos, jethrogb, Fabrice Fontaine, + Will Cosgrove, Daniel Stenberg, Michael Buckley, Wallace Souza Silva, + Romain-Geissler-1A, meierha, Tseng Jun, Thomas Klausner, Brendan Shanks, + Harry Sintonen, monnerat, Koutheir Attouchi, Marc Hörsken, yann-morin-1998, + Wez Furlong, TDi-jonesds, David Benjamin, Max Dymond, Igor Klevanets, + Viktor Szakats, Laurent Stacul, Mstrodl, Gabriel Smith, MarcT512, + Paul Capron, teottin, Tor Erik Ottinsen, Brian Inglis - Thanks! (and sorry if I forgot to mention someone) + (40 contributors) diff --git a/vendor/libssh2/acinclude.m4 b/vendor/libssh2/acinclude.m4 index 734ef070cf..2066f0ec9b 100644 --- a/vendor/libssh2/acinclude.m4 +++ b/vendor/libssh2/acinclude.m4 @@ -382,86 +382,131 @@ AC_DEFUN([CURL_CONFIGURE_REENTRANT], [ # ]) -AC_DEFUN([LIBSSH2_CHECKFOR_MBEDTLS], [ - - old_LDFLAGS=$LDFLAGS - old_CFLAGS=$CFLAGS - if test -n "$use_mbedtls" && test "$use_mbedtls" != "no"; then - LDFLAGS="$LDFLAGS -L$use_mbedtls/lib" - CFLAGS="$CFLAGS -I$use_mbedtls/include" - fi +dnl LIBSSH2_LIB_HAVE_LINKFLAGS +dnl -------------------------- +dnl Wrapper around AC_LIB_HAVE_LINKFLAGS to also check $prefix/lib, if set. +dnl +dnl autoconf only checks $prefix/lib64 if gcc -print-search-dirs output +dnl includes a directory named lib64. So, to find libraries in $prefix/lib +dnl we append -L$prefix/lib to LDFLAGS before checking. +dnl +dnl For conveniece, $4 is expanded if [lib]$1 is found. - AC_LIB_HAVE_LINKFLAGS([mbedtls], [], [ - #include - ]) +AC_DEFUN([LIBSSH2_LIB_HAVE_LINKFLAGS], [ + libssh2_save_CPPFLAGS="$CPPFLAGS" + libssh2_save_LDFLAGS="$LDFLAGS" - if test "$ac_cv_libmbedtls" = "yes"; then - AC_DEFINE(LIBSSH2_MBEDTLS, 1, [Use mbedtls]) - LIBSREQUIRED= # mbedtls doesn't provide a .pc file - LIBS="$LIBS -lmbedtls -lmbedcrypto" - found_crypto=libmbedtls - support_clear_memory=yes - else - # restore - LDFLAGS=$old_LDFLAGS - CFLAGS=$old_CFLAGS + if test "${with_lib$1_prefix+set}" = set; then + CPPFLAGS="$CPPFLAGS${CPPFLAGS:+ }-I${with_lib$1_prefix}/include" + LDFLAGS="$LDFLAGS${LDFLAGS:+ }-L${with_lib$1_prefix}/lib" fi -]) -AC_DEFUN([LIBSSH2_CHECKFOR_GCRYPT], [ + AC_LIB_HAVE_LINKFLAGS([$1], [$2], [$3]) - old_LDFLAGS=$LDFLAGS - old_CFLAGS=$CFLAGS - if test -n "$use_libgcrypt" && test "$use_libgcrypt" != "no"; then - LDFLAGS="$LDFLAGS -L$use_libgcrypt/lib" - CFLAGS="$CFLAGS -I$use_libgcrypt/include" - fi - AC_LIB_HAVE_LINKFLAGS([gcrypt], [], [ - #include - ]) + LDFLAGS="$libssh2_save_LDFLAGS" - if test "$ac_cv_libgcrypt" = "yes"; then - AC_DEFINE(LIBSSH2_LIBGCRYPT, 1, [Use libgcrypt]) - LIBSREQUIRED= # libgcrypt doesn't provide a .pc file. sad face. - LIBS="$LIBS -lgcrypt" - found_crypto=libgcrypt + if test "$ac_cv_lib$1" = "yes"; then : + $4 else - # restore - LDFLAGS=$old_LDFLAGS - CFLAGS=$old_CFLAGS + CPPFLAGS="$libssh2_save_CPPFLAGS" fi ]) +AC_DEFUN([LIBSSH2_CHECK_CRYPTO], [ +if test "$use_crypto" = "auto" && test "$found_crypto" = "none" || test "$use_crypto" = "$1"; then +m4_case([$1], +[openssl], [ + LIBSSH2_LIB_HAVE_LINKFLAGS([ssl], [crypto], [#include ], [ + AC_DEFINE(LIBSSH2_OPENSSL, 1, [Use $1]) + LIBSREQUIRED="$LIBSREQUIRED${LIBSREQUIRED:+ }libssl libcrypto" + + # Not all OpenSSL have AES-CTR functions. + libssh2_save_LIBS="$LIBS" + LIBS="$LIBS $LIBSSL" + AC_CHECK_FUNCS(EVP_aes_128_ctr) + LIBS="$libssh2_save_LIBS" + + found_crypto="$1" + found_crypto_str="OpenSSL (AES-CTR: ${ac_cv_func_EVP_aes_128_ctr:-N/A})" + ]) +], + +[libgcrypt], [ + LIBSSH2_LIB_HAVE_LINKFLAGS([gcrypt], [], [#include ], [ + AC_DEFINE(LIBSSH2_LIBGCRYPT, 1, [Use $1]) + found_crypto="$1" + ]) +], -AC_DEFUN([LIBSSH2_CHECKFOR_WINCNG], [ +[mbedtls], [ + LIBSSH2_LIB_HAVE_LINKFLAGS([mbedcrypto], [], [#include ], [ + AC_DEFINE(LIBSSH2_MBEDTLS, 1, [Use $1]) + LIBS="$LIBS -lmbedcrypto" + found_crypto="$1" + support_clear_memory=yes + ]) +], +[wincng], [ # Look for Windows Cryptography API: Next Generation - AC_LIB_HAVE_LINKFLAGS([bcrypt], [], [ - #include - #include - ]) - AC_LIB_HAVE_LINKFLAGS([crypt32], [], [ + AC_CHECK_HEADERS([ntdef.h ntstatus.h], [], [], [#include ]) + AC_CHECK_DECLS([SecureZeroMemory], [], [], [#include ]) + + LIBSSH2_LIB_HAVE_LINKFLAGS([crypt32], [], [ #include #include ]) - AC_CHECK_HEADERS([ntdef.h ntstatus.h], [], [], [ - #include - ]) - AC_CHECK_DECLS([SecureZeroMemory], [], [], [ + LIBSSH2_LIB_HAVE_LINKFLAGS([bcrypt], [], [ #include + #include + ], [ + AC_DEFINE(LIBSSH2_WINCNG, 1, [Use $1]) + found_crypto="$1" + found_crypto_str="Windows Cryptography API: Next Generation" + support_clear_memory="$ac_cv_have_decl_SecureZeroMemory" ]) +], +) + test "$found_crypto" = "none" && + crypto_errors="${crypto_errors}No $1 crypto library found! +" +fi +]) - if test "$ac_cv_libbcrypt" = "yes"; then - AC_DEFINE(LIBSSH2_WINCNG, 1, [Use Windows CNG]) - LIBSREQUIRED= # wincng doesn't provide a .pc file. sad face. - LIBS="$LIBS -lbcrypt" - if test "$ac_cv_libcrypt32" = "yes"; then - LIBS="$LIBS -lcrypt32" - fi - found_crypto="Windows Cryptography API: Next Generation" - if test "$ac_cv_have_decl_SecureZeroMemory" = "yes"; then - support_clear_memory=yes - fi + +dnl LIBSSH2_CHECK_OPTION_WERROR +dnl ------------------------------------------------- +dnl Verify if configure has been invoked with option +dnl --enable-werror or --disable-werror, and set +dnl shell variable want_werror as appropriate. + +AC_DEFUN([LIBSSH2_CHECK_OPTION_WERROR], [ + AC_BEFORE([$0],[LIBSSH2_CHECK_COMPILER])dnl + AC_MSG_CHECKING([whether to enable compiler warnings as errors]) + OPT_COMPILER_WERROR="default" + AC_ARG_ENABLE(werror, +AC_HELP_STRING([--enable-werror],[Enable compiler warnings as errors]) +AC_HELP_STRING([--disable-werror],[Disable compiler warnings as errors]), + OPT_COMPILER_WERROR=$enableval) + case "$OPT_COMPILER_WERROR" in + no) + dnl --disable-werror option used + want_werror="no" + ;; + default) + dnl configure option not specified + want_werror="no" + ;; + *) + dnl --enable-werror option used + want_werror="yes" + ;; + esac + AC_MSG_RESULT([$want_werror]) + + if test X"$want_werror" = Xyes; then + CFLAGS="$CFLAGS -Werror" fi ]) + diff --git a/vendor/libssh2/aclocal.m4 b/vendor/libssh2/aclocal.m4 index 41ad8c694f..fc56a693e4 100644 --- a/vendor/libssh2/aclocal.m4 +++ b/vendor/libssh2/aclocal.m4 @@ -1,6 +1,6 @@ -# generated automatically by aclocal 1.15 -*- Autoconf -*- +# generated automatically by aclocal 1.16.4 -*- Autoconf -*- -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2021 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -14,13 +14,13 @@ m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl -m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, -[m4_warning([this file was generated for autoconf 2.69. +m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],, +[m4_warning([this file was generated for autoconf 2.71. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) -# Copyright (C) 2002-2014 Free Software Foundation, Inc. +# Copyright (C) 2002-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -32,10 +32,10 @@ To do so, use the procedure documented by the package, typically 'autoreconf'.]) # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], -[am__api_version='1.15' +[am__api_version='1.16' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. -m4_if([$1], [1.15], [], +m4_if([$1], [1.16.4], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) @@ -51,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], []) # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], -[AM_AUTOMAKE_VERSION([1.15])dnl +[AM_AUTOMAKE_VERSION([1.16.4])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -110,7 +110,7 @@ am_aux_dir=`cd "$ac_aux_dir" && pwd` # AM_CONDITIONAL -*- Autoconf -*- -# Copyright (C) 1997-2014 Free Software Foundation, Inc. +# Copyright (C) 1997-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -141,7 +141,7 @@ AC_CONFIG_COMMANDS_PRE( Usually this means the macro was only invoked conditionally.]]) fi])]) -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -332,13 +332,12 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl # Generate code to set up dependency tracking. -*- Autoconf -*- -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. - # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], @@ -346,49 +345,43 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. - case $CONFIG_FILES in - *\'*) eval set x "$CONFIG_FILES" ;; - *) set x $CONFIG_FILES ;; - esac + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + AS_CASE([$CONFIG_FILES], + [*\'*], [eval set x "$CONFIG_FILES"], + [*], [set x $CONFIG_FILES]) shift - for mf + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf do # Strip MF so we end up with the name of the file. - mf=`echo "$mf" | sed -e 's/:.*$//'` - # Check whether this is an Automake generated Makefile or not. - # We used to match only the files named 'Makefile.in', but - # some people rename them; so instead we look at the file content. - # Grep'ing the first line is not enough: some people post-process - # each Makefile.in and add a new line on top of each file to say so. - # Grep'ing the whole file is not good either: AIX grep has a line + am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. - if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then - dirpart=`AS_DIRNAME("$mf")` - else - continue - fi - # Extract the definition of DEPDIR, am__include, and am__quote - # from the Makefile without running 'make'. - DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` - test -z "$DEPDIR" && continue - am__include=`sed -n 's/^am__include = //p' < "$mf"` - test -z "$am__include" && continue - am__quote=`sed -n 's/^am__quote = //p' < "$mf"` - # Find all dependency output files, they are included files with - # $(DEPDIR) in their names. We invoke sed twice because it is the - # simplest approach to changing $(DEPDIR) to its actual value in the - # expansion. - for file in `sed -n " - s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ - sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do - # Make sure the directory exists. - test -f "$dirpart/$file" && continue - fdir=`AS_DIRNAME(["$file"])` - AS_MKDIR_P([$dirpart/$fdir]) - # echo "creating $dirpart/$file" - echo '# dummy' > "$dirpart/$file" - done + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`AS_DIRNAME(["$am_mf"])` + am_filepart=`AS_BASENAME(["$am_mf"])` + AM_RUN_LOG([cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles]) || am_rc=$? done + if test $am_rc -ne 0; then + AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE="gmake" (or whatever is + necessary). You can also try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking).]) + fi + AS_UNSET([am_dirpart]) + AS_UNSET([am_filepart]) + AS_UNSET([am_mf]) + AS_UNSET([am_rc]) + rm -f conftest-deps.mk } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS @@ -397,18 +390,17 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # -# This code is only required when automatic dependency tracking -# is enabled. FIXME. This creates each '.P' file that we will -# need in order to bootstrap the dependency handling code. +# This code is only required when automatic dependency tracking is enabled. +# This creates each '.Po' and '.Plo' makefile fragment that we'll need in +# order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], - [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) -]) + [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])]) # Do all the work for Automake. -*- Autoconf -*- -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -472,7 +464,7 @@ m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( - m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), + m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl @@ -495,8 +487,8 @@ AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: -# -# +# +# AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. @@ -524,6 +516,20 @@ AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) +# Variables for tags utilities; see am/tags.am +if test -z "$CTAGS"; then + CTAGS=ctags +fi +AC_SUBST([CTAGS]) +if test -z "$ETAGS"; then + ETAGS=etags +fi +AC_SUBST([ETAGS]) +if test -z "$CSCOPE"; then + CSCOPE=cscope +fi +AC_SUBST([CSCOPE]) + AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This @@ -563,7 +569,7 @@ END Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation -that behaves properly: . +that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM @@ -605,7 +611,7 @@ for _am_header in $config_headers :; do done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -626,7 +632,7 @@ if test x"${install_sh+set}" != xset; then fi AC_SUBST([install_sh])]) -# Copyright (C) 2003-2014 Free Software Foundation, Inc. +# Copyright (C) 2003-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -648,7 +654,7 @@ AC_SUBST([am__leading_dot])]) # Add --enable-maintainer-mode option to configure. -*- Autoconf -*- # From Jim Meyering -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -683,7 +689,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) # Check to see how 'make' treats includes. -*- Autoconf -*- -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -691,49 +697,42 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) # AM_MAKE_INCLUDE() # ----------------- -# Check to see how make treats includes. +# Check whether make has an 'include' directive that can support all +# the idioms we need for our automatic dependency tracking code. AC_DEFUN([AM_MAKE_INCLUDE], -[am_make=${MAKE-make} -cat > confinc << 'END' +[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive]) +cat > confinc.mk << 'END' am__doit: - @echo this is the am__doit target + @echo this is the am__doit target >confinc.out .PHONY: am__doit END -# If we don't find an include directive, just comment out the code. -AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= -_am_result=none -# First try GNU make style include. -echo "include confinc" > confmf -# Ignore all kinds of additional output from 'make'. -case `$am_make -s -f confmf 2> /dev/null` in #( -*the\ am__doit\ target*) - am__include=include - am__quote= - _am_result=GNU - ;; -esac -# Now try BSD make style include. -if test "$am__include" = "#"; then - echo '.include "confinc"' > confmf - case `$am_make -s -f confmf 2> /dev/null` in #( - *the\ am__doit\ target*) - am__include=.include - am__quote="\"" - _am_result=BSD - ;; - esac -fi -AC_SUBST([am__include]) -AC_SUBST([am__quote]) -AC_MSG_RESULT([$_am_result]) -rm -f confinc confmf -]) +# BSD make does it like this. +echo '.include "confinc.mk" # ignored' > confmf.BSD +# Other make implementations (GNU, Solaris 10, AIX) do it like this. +echo 'include confinc.mk # ignored' > confmf.GNU +_am_result=no +for s in GNU BSD; do + AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out]) + AS_CASE([$?:`cat confinc.out 2>/dev/null`], + ['0:this is the am__doit target'], + [AS_CASE([$s], + [BSD], [am__include='.include' am__quote='"'], + [am__include='include' am__quote=''])]) + if test "$am__include" != "#"; then + _am_result="yes ($s style)" + break + fi +done +rm -f confinc.* confmf.* +AC_MSG_RESULT([${_am_result}]) +AC_SUBST([am__include])]) +AC_SUBST([am__quote])]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- -# Copyright (C) 1997-2014 Free Software Foundation, Inc. +# Copyright (C) 1997-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -754,12 +753,7 @@ AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; - *) - MISSING="\${SHELL} $am_aux_dir/missing" ;; - esac + MISSING="\${SHELL} '$am_aux_dir/missing'" fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then @@ -772,7 +766,7 @@ fi # Helper functions for option handling. -*- Autoconf -*- -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -801,7 +795,7 @@ AC_DEFUN([_AM_SET_OPTIONS], AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -848,7 +842,7 @@ AC_LANG_POP([C])]) # For backward compatibility. AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -867,7 +861,7 @@ AC_DEFUN([AM_RUN_LOG], # Check to make sure that the build environment is sane. -*- Autoconf -*- -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -948,7 +942,7 @@ AC_CONFIG_COMMANDS_PRE( rm -f conftest.file ]) -# Copyright (C) 2009-2014 Free Software Foundation, Inc. +# Copyright (C) 2009-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -1008,7 +1002,7 @@ AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -1036,7 +1030,7 @@ fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) -# Copyright (C) 2006-2014 Free Software Foundation, Inc. +# Copyright (C) 2006-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -1055,7 +1049,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- -# Copyright (C) 2004-2014 Free Software Foundation, Inc. +# Copyright (C) 2004-2021 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, diff --git a/vendor/libssh2/buildconf b/vendor/libssh2/buildconf deleted file mode 100755 index 558dcb660f..0000000000 --- a/vendor/libssh2/buildconf +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -LIBTOOLIZE="libtoolize" - -if [ "x`which $LIBTOOLIZE`" = "x" ]; then - LIBTOOLIZE="glibtoolize" -fi - -if [ "x`which $LIBTOOLIZE`" = "x" ]; then - echo "Neither libtoolize nor glibtoolize could be found!" - exit 1 -fi - -${LIBTOOLIZE} --copy --automake --force -${ACLOCAL:-aclocal} -I m4 $ACLOCAL_FLAGS -${AUTOHEADER:-autoheader} -# copy the private libssh2_config.h.in to the examples dir so that -# it can be included without pointing the include path to the private -# source dir -cp src/libssh2_config.h.in example/libssh2_config.h.in -${AUTOCONF:-autoconf} -${AUTOMAKE:-automake} --add-missing --copy diff --git a/vendor/libssh2/compile b/vendor/libssh2/compile index a85b723c7e..23fcba0113 100755 --- a/vendor/libssh2/compile +++ b/vendor/libssh2/compile @@ -1,9 +1,9 @@ #! /bin/sh # Wrapper for compilers which do not understand '-c -o'. -scriptversion=2012-10-14.11; # UTC +scriptversion=2018-03-07.03; # UTC -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2020 Free Software Foundation, Inc. # Written by Tom Tromey . # # This program is free software; you can redistribute it and/or modify @@ -17,7 +17,7 @@ scriptversion=2012-10-14.11; # UTC # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -53,7 +53,7 @@ func_file_conv () MINGW*) file_conv=mingw ;; - CYGWIN*) + CYGWIN* | MSYS*) file_conv=cygwin ;; *) @@ -67,7 +67,7 @@ func_file_conv () mingw/*) file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` ;; - cygwin/*) + cygwin/* | msys/*) file=`cygpath -m "$file" || echo "$file"` ;; wine/*) @@ -255,7 +255,8 @@ EOF echo "compile $scriptversion" exit $? ;; - cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ + icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) func_cl_wrapper "$@" # Doesn't return... ;; esac @@ -339,9 +340,9 @@ exit $ret # Local Variables: # mode: shell-script # sh-indentation: 2 -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" +# time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: diff --git a/vendor/libssh2/config.guess b/vendor/libssh2/config.guess index d622a44e55..f50dcdb6de 100755 --- a/vendor/libssh2/config.guess +++ b/vendor/libssh2/config.guess @@ -1,14 +1,12 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012 Free Software Foundation, Inc. +# Copyright 1992-2018 Free Software Foundation, Inc. -timestamp='2012-02-10' +timestamp='2018-02-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or +# the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but @@ -17,24 +15,22 @@ timestamp='2012-02-10' # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - - -# Originally written by Per Bothner. Please send patches (context -# diff format) to and include a ChangeLog -# entry. +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). # -# This script attempts to guess a canonical system name similar to -# config.sub. If it succeeds, it prints the system name on stdout, and -# exits with 0. Otherwise, it exits with 1. +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + me=`echo "$0" | sed -e 's,.*/,,'` @@ -43,7 +39,7 @@ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -54,9 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. +Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -113,9 +107,9 @@ trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; + ,,) echo "int x;" > "$dummy.c" ; for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + if ($c -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; @@ -138,9 +132,37 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown +case "$UNAME_SYSTEM" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval "$set_cc_for_build" + cat <<-EOF > "$dummy.c" + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" + + # If ldd exists, use it to detect musl libc. + if command -v ldd >/dev/null && \ + ldd --version 2>&1 | grep -q ^musl + then + LIBC=musl + fi + ;; +esac + # Note: order is significant - the case branches are not exclusive. -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in +case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, @@ -153,21 +175,31 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + "/sbin/$sysctl" 2>/dev/null || \ + "/usr/sbin/$sysctl" 2>/dev/null || \ + echo unknown)` + case "$UNAME_MACHINE_ARCH" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + earmv*) + arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` + machine="${arch}${endian}"-unknown + ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; esac # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in + # to ELF recently (or will in the future) and ABI. + case "$UNAME_MACHINE_ARCH" in + earm*) + os=netbsdelf + ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build + eval "$set_cc_for_build" if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then @@ -182,40 +214,67 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in os=netbsd ;; esac + # Determine ABI tags. + case "$UNAME_MACHINE_ARCH" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` + ;; + esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in + case "$UNAME_VERSION" in Debian*) release='-gnu' ;; *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" + echo "$machine-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" exit ;; *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" exit ;; *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" exit ;; macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:Sortix:*:*) + echo "$UNAME_MACHINE"-unknown-sortix exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox + exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 + exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) @@ -232,63 +291,54 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; + UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; + UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; + UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; + UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; + UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; + UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; + UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; + UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; + UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos + echo "$UNAME_MACHINE"-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos + echo "$UNAME_MACHINE"-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition @@ -300,9 +350,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} + echo arm-acorn-riscix"$UNAME_RELEASE" exit ;; - arm:riscos:*:*|arm:RISCOS:*:*) + arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) @@ -327,38 +377,38 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} + echo i386-pc-auroraux"$UNAME_RELEASE" exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" + eval "$set_cc_for_build" + SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then - SUN_ARCH="x86_64" + SUN_ARCH=x86_64 fi fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in @@ -367,25 +417,25 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" exit ;; sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" ;; sun4) - echo sparc-sun-sunos${UNAME_RELEASE} + echo sparc-sun-sunos"$UNAME_RELEASE" ;; esac exit ;; aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} + echo sparc-auspex-sunos"$UNAME_RELEASE" exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not @@ -396,44 +446,44 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} + echo m68k-milan-mint"$UNAME_RELEASE" exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} + echo m68k-hades-mint"$UNAME_RELEASE" exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} + echo m68k-unknown-mint"$UNAME_RELEASE" exit ;; m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} + echo m68k-apple-machten"$UNAME_RELEASE" exit ;; powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} + echo powerpc-apple-machten"$UNAME_RELEASE" exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} + echo mips-dec-ultrix"$UNAME_RELEASE" exit ;; VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} + echo vax-dec-ultrix"$UNAME_RELEASE" exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} + echo clipper-intergraph-clix"$UNAME_RELEASE" exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { @@ -442,23 +492,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`"$dummy" "$dummyarg"` && { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} + echo mips-mips-riscos"$UNAME_RELEASE" exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax @@ -484,17 +534,17 @@ EOF AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] + if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ + [ "$TARGET_BINARY_INTERFACE"x = x ] then - echo m88k-dg-dgux${UNAME_RELEASE} + echo m88k-dg-dgux"$UNAME_RELEASE" else - echo m88k-dg-dguxbcs${UNAME_RELEASE} + echo m88k-dg-dguxbcs"$UNAME_RELEASE" fi else - echo i586-dg-dgux${UNAME_RELEASE} + echo i586-dg-dgux"$UNAME_RELEASE" fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) @@ -511,7 +561,7 @@ EOF echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id @@ -523,14 +573,14 @@ EOF if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #include main() @@ -541,7 +591,7 @@ EOF exit(0); } EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` then echo "$SYSTEM_NAME" else @@ -555,26 +605,27 @@ EOF exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx @@ -589,28 +640,28 @@ EOF echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + case "$UNAME_MACHINE" in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + case "$sc_cpu_version" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + case "$sc_kernel_bits" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + if [ "$HP_ARCH" = "" ]; then + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #define _HPUX_SOURCE #include @@ -643,13 +694,13 @@ EOF exit (0); } EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac - if [ ${HP_ARCH} = "hppa2.0w" ] + if [ "$HP_ARCH" = hppa2.0w ] then - eval $set_cc_for_build + eval "$set_cc_for_build" # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler @@ -660,23 +711,23 @@ EOF # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then - HP_ARCH="hppa2.0w" + HP_ARCH=hppa2.0w else - HP_ARCH="hppa64" + HP_ARCH=hppa64 fi fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" exit ;; ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux"$HPUX_REV" exit ;; 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #include int main () @@ -701,11 +752,11 @@ EOF exit (0); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) @@ -714,7 +765,7 @@ EOF *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) @@ -722,9 +773,9 @@ EOF exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk + echo "$UNAME_MACHINE"-unknown-osf1mk else - echo ${UNAME_MACHINE}-unknown-osf1 + echo "$UNAME_MACHINE"-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) @@ -749,124 +800,109 @@ EOF echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" exit ;; sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} + echo sparc-unknown-bsdi"$UNAME_RELEASE" exit ;; *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in + case "$UNAME_PROCESSOR" in amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; esac + echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin + echo "$UNAME_MACHINE"-pc-cygwin exit ;; - *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 + *:MINGW64*:*) + echo "$UNAME_MACHINE"-pc-mingw64 exit ;; - i*:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys + *:MINGW*:*) + echo "$UNAME_MACHINE"-pc-mingw32 exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 + *:MSYS*:*) + echo "$UNAME_MACHINE"-pc-msys exit ;; i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 + echo "$UNAME_MACHINE"-pc-pw32 exit ;; *:Interix*:*) - case ${UNAME_MACHINE} in + case "$UNAME_MACHINE" in x86) - echo i586-pc-interix${UNAME_RELEASE} + echo i586-pc-interix"$UNAME_RELEASE" exit ;; authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} + echo x86_64-unknown-interix"$UNAME_RELEASE" exit ;; IA64) - echo ia64-unknown-interix${UNAME_RELEASE} + echo ia64-unknown-interix"$UNAME_RELEASE" exit ;; esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin + echo "$UNAME_MACHINE"-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; *:GNU:*:*) # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" exit ;; i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix + echo "$UNAME_MACHINE"-pc-minix exit ;; aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in @@ -879,63 +915,64 @@ EOF EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi - echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) - eval $set_cc_for_build + eval "$set_cc_for_build" if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo ${UNAME_MACHINE}-unknown-linux-gnueabi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi else - echo ${UNAME_MACHINE}-unknown-linux-gnueabihf + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf fi fi exit ;; avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + e2k:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:Linux:*:*) - LIBC=gnu - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #ifdef __dietlibc__ - LIBC=dietlibc - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` - echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + k1om:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el @@ -949,55 +986,74 @@ EOF #endif #endif EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`" + test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; } ;; - or32:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + mips64el:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-"$LIBC" + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; padre:Linux:*:*) - echo sparc-unknown-linux-gnu + echo sparc-unknown-linux-"$LIBC" exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-gnu + echo hppa64-unknown-linux-"$LIBC" exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-gnu ;; - PA8*) echo hppa2.0-unknown-linux-gnu ;; - *) echo hppa-unknown-linux-gnu ;; + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; esac exit ;; ppc64:Linux:*:*) - echo powerpc64-unknown-linux-gnu + echo powerpc64-unknown-linux-"$LIBC" exit ;; ppc:Linux:*:*) - echo powerpc-unknown-linux-gnu + echo powerpc-unknown-linux-"$LIBC" + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-"$LIBC" + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-"$LIBC" + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" exit ;; sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-gnu + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" exit ;; x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + if objdump -f /bin/sh | grep -q elf32-x86-64; then + echo "$UNAME_MACHINE"-pc-linux-"$LIBC"x32 + else + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" + fi exit ;; xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. @@ -1011,34 +1067,34 @@ EOF # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx + echo "$UNAME_MACHINE"-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop + echo "$UNAME_MACHINE"-unknown-stop exit ;; i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos + echo "$UNAME_MACHINE"-unknown-atheos exit ;; i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable + echo "$UNAME_MACHINE"-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} + echo i386-unknown-lynxos"$UNAME_RELEASE" exit ;; i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp + echo "$UNAME_MACHINE"-pc-msdosdjgpp exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + i*86:*:4.*:*) + UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" fi exit ;; i*86:*:5:[678]*) @@ -1048,12 +1104,12 @@ EOF *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}" exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 @@ -1063,9 +1119,9 @@ EOF && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv32 + echo "$UNAME_MACHINE"-pc-sysv32 fi exit ;; pc:*:*:*) @@ -1073,7 +1129,7 @@ EOF # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that + # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; @@ -1085,9 +1141,9 @@ EOF exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) @@ -1107,9 +1163,9 @@ EOF test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; @@ -1118,28 +1174,28 @@ EOF test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} + echo m68k-unknown-lynxos"$UNAME_RELEASE" exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} + echo sparc-unknown-lynxos"$UNAME_RELEASE" exit ;; rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} + echo rs6000-unknown-lynxos"$UNAME_RELEASE" exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} + echo powerpc-unknown-lynxos"$UNAME_RELEASE" exit ;; SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} + echo mips-dde-sysv"$UNAME_RELEASE" exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 @@ -1150,7 +1206,7 @@ EOF *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 + echo "$UNAME_MACHINE"-sni-sysv4 else echo ns32k-sni-sysv fi @@ -1170,23 +1226,23 @@ EOF exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos + echo "$UNAME_MACHINE"-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} + echo m68k-apple-aux"$UNAME_RELEASE" exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} + echo mips-nec-sysv"$UNAME_RELEASE" else - echo mips-unknown-sysv${UNAME_RELEASE} + echo mips-unknown-sysv"$UNAME_RELEASE" fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. @@ -1201,66 +1257,97 @@ EOF BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} + echo sx4-nec-superux"$UNAME_RELEASE" exit ;; SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} + echo sx5-nec-superux"$UNAME_RELEASE" exit ;; SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} + echo sx6-nec-superux"$UNAME_RELEASE" exit ;; SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} + echo sx7-nec-superux"$UNAME_RELEASE" exit ;; SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} + echo sx8-nec-superux"$UNAME_RELEASE" exit ;; SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} + echo sx8r-nec-superux"$UNAME_RELEASE" + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux"$UNAME_RELEASE" exit ;; Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} + echo powerpc-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - case $UNAME_PROCESSOR in - i386) - eval $set_cc_for_build - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - UNAME_PROCESSOR="x86_64" - fi - fi ;; - unknown) UNAME_PROCESSOR=powerpc ;; - esac - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + eval "$set_cc_for_build" + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then + if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk"$UNAME_RELEASE" exit ;; - NSE-?:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk"$UNAME_RELEASE" exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk"$UNAME_RELEASE" exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux @@ -1269,18 +1356,18 @@ EOF echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. - if test "$cputype" = "386"; then + if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi - echo ${UNAME_MACHINE}-unknown-plan9 + echo "$UNAME_MACHINE"-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 @@ -1301,14 +1388,14 @@ EOF echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} + echo mips-sei-seiux"$UNAME_RELEASE" exit ;; *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in + case "$UNAME_MACHINE" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; @@ -1317,185 +1404,48 @@ EOF echo i386-pc-xenix exit ;; i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" exit ;; i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos + echo "$UNAME_MACHINE"-pc-rdos exit ;; i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros + echo "$UNAME_MACHINE"-pc-aros exit ;; x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx + echo "$UNAME_MACHINE"-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs exit ;; esac -#echo '(No uname command or uname output not recognized.)' 1>&2 -#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 - -eval $set_cc_for_build -cat >$dummy.c < -# include -#endif -main () -{ -#if defined (sony) -#if defined (MIPSEB) - /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, - I don't know.... */ - printf ("mips-sony-bsd\n"); exit (0); -#else -#include - printf ("m68k-sony-newsos%s\n", -#ifdef NEWSOS4 - "4" -#else - "" -#endif - ); exit (0); -#endif -#endif - -#if defined (__arm) && defined (__acorn) && defined (__unix) - printf ("arm-acorn-riscix\n"); exit (0); -#endif +echo "$0: unable to guess system type" >&2 -#if defined (hp300) && !defined (hpux) - printf ("m68k-hp-bsd\n"); exit (0); -#endif - -#if defined (NeXT) -#if !defined (__ARCHITECTURE__) -#define __ARCHITECTURE__ "m68k" -#endif - int version; - version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; - if (version < 4) - printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); - else - printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); - exit (0); -#endif - -#if defined (MULTIMAX) || defined (n16) -#if defined (UMAXV) - printf ("ns32k-encore-sysv\n"); exit (0); -#else -#if defined (CMU) - printf ("ns32k-encore-mach\n"); exit (0); -#else - printf ("ns32k-encore-bsd\n"); exit (0); -#endif -#endif -#endif - -#if defined (__386BSD__) - printf ("i386-pc-bsd\n"); exit (0); -#endif - -#if defined (sequent) -#if defined (i386) - printf ("i386-sequent-dynix\n"); exit (0); -#endif -#if defined (ns32000) - printf ("ns32k-sequent-dynix\n"); exit (0); -#endif -#endif +case "$UNAME_MACHINE:$UNAME_SYSTEM" in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 < -# if defined (BSD) -# if BSD == 43 - printf ("vax-dec-bsd4.3\n"); exit (0); -# else -# if BSD == 199006 - printf ("vax-dec-bsd4.3reno\n"); exit (0); -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# endif -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# else - printf ("vax-dec-ultrix\n"); exit (0); -# endif -#endif - -#if defined (alliant) && defined (i860) - printf ("i860-alliant-bsd\n"); exit (0); -#endif - - exit (1); -} +NOTE: MIPS GNU/Linux systems require a C compiler to fully recognize +the system type. Please install a C compiler and try again. EOF - -$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - -# Apollos put the system type in the environment. - -test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } - -# Convex versions that predate uname can use getsysinfo(1) - -if [ -x /usr/convex/getsysinfo ] -then - case `getsysinfo -f cpu_type` in - c1*) - echo c1-convex-bsd - exit ;; - c2*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - c34*) - echo c34-convex-bsd - exit ;; - c38*) - echo c38-convex-bsd - exit ;; - c4*) - echo c4-convex-bsd - exit ;; - esac -fi + ;; +esac cat >&2 < in order to provide the needed -information to handle your system. +If $0 has already been updated, send the following data and any +information you think might be pertinent to config-patches@gnu.org to +provide the necessary information to handle your system. config.guess timestamp = $timestamp @@ -1514,16 +1464,16 @@ hostinfo = `(hostinfo) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" EOF exit 1 # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'write-file-functions 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/vendor/libssh2/config.sub b/vendor/libssh2/config.sub index c894da4550..1d8e98bcee 100755 --- a/vendor/libssh2/config.sub +++ b/vendor/libssh2/config.sub @@ -1,36 +1,31 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012 Free Software Foundation, Inc. +# Copyright 1992-2018 Free Software Foundation, Inc. -timestamp='2012-02-10' +timestamp='2018-02-22' -# This file is (in principle) common to ALL GNU software. -# The presence of a machine in this file suggests that SOME GNU software -# can handle that machine. It does not imply ALL GNU software can. -# -# This file is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). -# Please send patches to . Submit a context -# diff and a properly formatted GNU ChangeLog entry. +# Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. @@ -38,7 +33,7 @@ timestamp='2012-02-10' # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases @@ -58,12 +53,11 @@ timestamp='2012-02-10' me=`echo "$0" | sed -e 's,.*/,,'` usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -73,9 +67,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. +Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -102,7 +94,7 @@ while test $# -gt 0 ; do *local*) # First pass through any local machine types. - echo $1 + echo "$1" exit ;; * ) @@ -120,24 +112,24 @@ esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +maybe_os=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; android-linux) os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + basic_machine=`echo "$1" | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown ;; *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` + basic_machine=`echo "$1" | sed 's/-[^-]*$//'` + if [ "$basic_machine" != "$1" ] + then os=`echo "$1" | sed 's/.*-/-/'` else os=; fi ;; esac @@ -156,7 +148,7 @@ case $os in -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze) + -apple | -axis | -knuth | -cray | -microblaze*) os= basic_machine=$1 ;; @@ -186,53 +178,56 @@ case $os in ;; -sco6) os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 ;; -lynx*) os=-lynxos ;; -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` + basic_machine=`echo "$1" | sed -e 's/86-.*/86-sequent/'` ;; -psos*) os=-psos @@ -253,21 +248,25 @@ case $basic_machine in | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ - | be32 | be64 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ | bfin \ - | c4x | clipper \ + | c4x | c8051 | clipper \ | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ - | i370 | i860 | i960 | ia64 \ + | i370 | i860 | i960 | ia16 | ia64 \ | ip2k | iq2000 \ + | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | mcore | mep | metag \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ @@ -281,26 +280,30 @@ case $basic_machine in | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ - | nios | nios2 \ + | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ - | open8 \ - | or32 \ - | pdp10 | pdp11 | pj | pjl \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ | pyramid \ + | riscv32 | riscv64 \ | rl78 | rx \ | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ @@ -308,7 +311,8 @@ case $basic_machine in | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ + | visium \ + | wasm32 \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown @@ -322,11 +326,14 @@ case $basic_machine in c6x) basic_machine=tic6x-unknown ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown os=-none ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65) ;; ms1) basic_machine=mt-unknown @@ -355,7 +362,7 @@ case $basic_machine in ;; # Object if more than one company name word. *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. @@ -364,26 +371,29 @@ case $basic_machine in | aarch64-* | aarch64_be-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ + | ba-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | clipper-* | craynv-* | cydra-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ + | e2k-* | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ + | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ | ip2k-* | iq2000-* \ + | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ @@ -397,28 +407,34 @@ case $basic_machine in | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ + | or1k*-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ | pyramid-* \ + | riscv32-* | riscv64-* \ | rl78-* | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ | tile*-* \ @@ -426,6 +442,8 @@ case $basic_machine in | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ + | visium-* \ + | wasm32-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ @@ -439,7 +457,7 @@ case $basic_machine in # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) - basic_machine=i386-unknown + basic_machine=i386-pc os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) @@ -473,7 +491,7 @@ case $basic_machine in basic_machine=x86_64-pc ;; amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl @@ -502,6 +520,9 @@ case $basic_machine in basic_machine=i386-pc os=-aros ;; + asmjs) + basic_machine=asmjs-unknown + ;; aux) basic_machine=m68k-apple os=-aux @@ -515,7 +536,7 @@ case $basic_machine in os=-linux ;; blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) @@ -523,13 +544,13 @@ case $basic_machine in os=-cnk ;; c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray @@ -618,10 +639,18 @@ case $basic_machine in basic_machine=rs6000-bull os=-bosx ;; - dpx2* | dpx2*-bull) + dpx2*) basic_machine=m68k-bull os=-sysv3 ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=$os"spe" + ;; ebmon29k) basic_machine=a29k-amd os=-ebmon @@ -711,9 +740,6 @@ case $basic_machine in hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; - hppa-next) - os=-nextstep3 - ;; hppaosf) basic_machine=hppa1.1-hp os=-osf @@ -726,26 +752,26 @@ case $basic_machine in basic_machine=i370-ibm ;; i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; - i386-vsta | vsta) + vsta) basic_machine=i386-unknown os=-vsta ;; @@ -763,17 +789,17 @@ case $basic_machine in basic_machine=m68k-isi os=-sysv ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'` + ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=-linux ;; - m88k-omron*) - basic_machine=m88k-omron - ;; magnum | m3230) basic_machine=mips-mips os=-sysv @@ -782,11 +808,15 @@ case $basic_machine in basic_machine=ns32k-utek os=-sysv ;; - microblaze) + microblaze*) basic_machine=microblaze-xilinx ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; mingw32) - basic_machine=i386-pc + basic_machine=i686-pc os=-mingw32 ;; mingw32ce) @@ -801,10 +831,10 @@ case $basic_machine in os=-mint ;; mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'` ;; mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k @@ -814,15 +844,19 @@ case $basic_machine in basic_machine=powerpc-unknown os=-morphos ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'` ;; msys) - basic_machine=i386-pc + basic_machine=i686-pc os=-msys ;; mvs) @@ -861,7 +895,7 @@ case $basic_machine in basic_machine=v70-nec os=-sysv ;; - next | m*-next ) + next | m*-next) basic_machine=m68k-next case $os in -nextstep* ) @@ -906,6 +940,12 @@ case $basic_machine in nsr-tandem) basic_machine=nsr-tandem ;; + nsv-tandem) + basic_machine=nsv-tandem + ;; + nsx-tandem) + basic_machine=nsx-tandem + ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf @@ -938,7 +978,7 @@ case $basic_machine in os=-linux ;; parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=-linux ;; pbd) @@ -954,7 +994,7 @@ case $basic_machine in basic_machine=i386-pc ;; pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc @@ -969,16 +1009,16 @@ case $basic_machine in basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould @@ -988,23 +1028,23 @@ case $basic_machine in ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; - ppcle | powerpclittle | ppc-le | powerpc-little) + ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) + ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm @@ -1013,7 +1053,11 @@ case $basic_machine in basic_machine=i586-unknown os=-pw32 ;; - rdos) + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) basic_machine=i386-pc os=-rdos ;; @@ -1054,17 +1098,10 @@ case $basic_machine in sequent) basic_machine=i386-sequent ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; sh5el) basic_machine=sh5le-unknown ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) + simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; @@ -1083,7 +1120,7 @@ case $basic_machine in os=-sysv4 ;; strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; sun2) basic_machine=m68000-sun @@ -1205,6 +1242,9 @@ case $basic_machine in basic_machine=hppa1.1-winbond os=-proelf ;; + x64) + basic_machine=x86_64-pc + ;; xbox) basic_machine=i686-pc os=-mingw32 @@ -1213,20 +1253,12 @@ case $basic_machine in basic_machine=xps100-honeywell ;; xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'` ;; ymp) basic_machine=ymp-cray os=-unicos ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim - ;; none) basic_machine=none-none os=-none @@ -1255,10 +1287,6 @@ case $basic_machine in vax) basic_machine=vax-dec ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; pdp11) basic_machine=pdp11-dec ;; @@ -1268,9 +1296,6 @@ case $basic_machine in sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; cydra) basic_machine=cydra-cydrome ;; @@ -1290,7 +1315,7 @@ case $basic_machine in # Make sure to match an already-canonicalized machine name. ;; *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 exit 1 ;; esac @@ -1298,10 +1323,10 @@ esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'` ;; *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'` ;; *) ;; @@ -1312,8 +1337,8 @@ esac if [ x"$os" != x"" ] then case $os in - # First match some system type aliases - # that might get confused with valid system types. + # First match some system type aliases that might get confused + # with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux @@ -1324,45 +1349,48 @@ case $os in -solaris) os=-solaris2 ;; - -svr4*) - os=-sysv4 - ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; - # First accept the basic system types. + # es1800 is here to avoid being matched by es* (a different OS) + -es1800*) + os=-ose + ;; + # Now accept the basic system types. # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. + # Each alternative MUST end in a * to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* \ + | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -openbsd* | -solidbsd* \ + | -hiux* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ + | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -morphos* | -superux* | -rtmk* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox* | -bme* \ + | -midnightbsd*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) @@ -1379,12 +1407,12 @@ case $os in -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + -sim | -xray | -os68k* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) - os=`echo $os | sed -e 's|mac|macos|'` + os=`echo "$os" | sed -e 's|mac|macos|'` ;; -linux-dietlibc) os=-linux-dietlibc @@ -1393,10 +1421,10 @@ case $os in os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` + os=`echo "$os" | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` + os=`echo "$os" | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition @@ -1407,12 +1435,6 @@ case $os in -wince*) os=-wince ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; -utek*) os=-bsd ;; @@ -1437,7 +1459,7 @@ case $os in -nova*) os=-rtmk-nova ;; - -ns2 ) + -ns2) os=-nextstep2 ;; -nsk*) @@ -1459,7 +1481,7 @@ case $os in -oss*) os=-sysv3 ;; - -svr4) + -svr4*) os=-sysv4 ;; -svr3) @@ -1474,35 +1496,38 @@ case $os in -ose*) os=-ose ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; - -aros*) - os=-aros - ;; - -kaos*) - os=-kaos - ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; + -pikeos*) + # Until real need of OS specific support for + # particular features comes up, bare metal + # configurations are quite functional. + case $basic_machine in + arm*) + os=-eabi + ;; + *) + os=-elf + ;; + esac + ;; -nacl*) ;; + -ios) + ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2 exit 1 ;; esac @@ -1537,6 +1562,12 @@ case $basic_machine in c4x-* | tic4x-*) os=-coff ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; tic54x-*) os=-coff ;; @@ -1586,12 +1617,12 @@ case $basic_machine in sparc-* | *-sun) os=-sunos4.1.1 ;; + pru-*) + os=-elf + ;; *-be) os=-beos ;; - *-haiku) - os=-haiku - ;; *-ibm) os=-aix ;; @@ -1631,7 +1662,7 @@ case $basic_machine in m88k-omron*) os=-luna ;; - *-next ) + *-next) os=-nextstep ;; *-sequent) @@ -1646,9 +1677,6 @@ case $basic_machine in i370-*) os=-mvs ;; - *-next) - os=-nextstep3 - ;; *-gould) os=-sysv ;; @@ -1758,15 +1786,15 @@ case $basic_machine in vendor=stratus ;; esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"` ;; esac -echo $basic_machine$os +echo "$basic_machine$os" exit # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'write-file-functions 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/vendor/libssh2/configure b/vendor/libssh2/configure index d891378d06..1cbf782b18 100755 --- a/vendor/libssh2/configure +++ b/vendor/libssh2/configure @@ -1,11 +1,12 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for libssh2 -. +# Generated by GNU Autoconf 2.71 for libssh2 -. # # Report bugs to . # # -# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, +# Inc. # # # This configure script is free software; the Free Software Foundation @@ -16,14 +17,16 @@ # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : +as_nop=: +if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST -else +else $as_nop case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( @@ -33,46 +36,46 @@ esac fi + +# Reset variables that may have inherited troublesome values from +# the environment. + +# IFS needs to be set, to space, tab, and newline, in precisely that order. +# (If _AS_PATH_WALK were called with IFS unset, it would have the +# side effect of setting IFS to empty, thus disabling word splitting.) +# Quoting is to prevent editors from complaining about space-tab. as_nl=' ' export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi +IFS=" "" $as_nl" + +PS1='$ ' +PS2='> ' +PS4='+ ' + +# Ensure predictable behavior from utilities with locale-dependent output. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# We cannot yet rely on "unset" to work, but we need these variables +# to be unset--not just set to an empty or harmless value--now, to +# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct +# also avoids known problems related to "unset" and subshell syntax +# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). +for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH +do eval test \${$as_var+y} \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done + +# Ensure that fds 0, 1, and 2 are open. +if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi +if (exec 3>&2) ; then :; else exec 2>/dev/null; fi # The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then +if ${PATH_SEPARATOR+false} :; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || @@ -81,13 +84,6 @@ if test "${PATH_SEPARATOR+set}" != set; then fi -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( @@ -96,8 +92,12 @@ case $0 in #(( for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + test -r "$as_dir$0" && as_myself=$as_dir$0 && break done IFS=$as_save_IFS @@ -109,30 +109,10 @@ if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. @@ -154,20 +134,22 @@ esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -as_fn_exit 255 +printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + as_bourne_compatible="as_nop=: +if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST -else +else \$as_nop case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( @@ -187,18 +169,20 @@ as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : +if ( set x; as_fn_ret_success y && test x = \"\$1\" ) +then : -else +else \$as_nop exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 +blah=\$(echo \$(echo blah)) +test x\"\$blah\" = xblah || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1 test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' @@ -206,31 +190,40 @@ test \$(( 1 + 1 )) = 2 || exit 1 ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ - || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1" - if (eval "$as_required") 2>/dev/null; then : + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null +then : as_have_required=yes -else +else $as_nop as_have_required=no fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null +then : -else +else $as_nop as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base + as_shell=$as_dir$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null +then : CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null +then : break 2 fi fi @@ -238,14 +231,21 @@ fi esac as_found=false done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } IFS=$as_save_IFS +if $as_found +then : + +else $as_nop + if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null +then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi +fi - if test "x$CONFIG_SHELL" != x; then : + if test "x$CONFIG_SHELL" != x +then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also @@ -263,18 +263,19 @@ esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." + if test x$as_have_required = xno +then : + printf "%s\n" "$0: This script requires a shell more modern than all" + printf "%s\n" "$0: the shells that I found on your system." + if test ${ZSH_VERSION+y} ; then + printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should" + printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later." else - $as_echo "$0: Please tell bug-autoconf@gnu.org and + printf "%s\n" "$0: Please tell bug-autoconf@gnu.org and $0: libssh2-devel@cool.haxx.se about your system, including $0: any error possibly output before this message. Then $0: install a modern shell, or manually run the script @@ -302,6 +303,7 @@ as_fn_unset () } as_unset=as_fn_unset + # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. @@ -319,6 +321,14 @@ as_fn_exit () as_fn_set_status $1 exit $1 } # as_fn_exit +# as_fn_nop +# --------- +# Do nothing but, unlike ":", preserve the value of $?. +as_fn_nop () +{ + return $? +} +as_nop=as_fn_nop # as_fn_mkdir_p # ------------- @@ -333,7 +343,7 @@ as_fn_mkdir_p () as_dirs= while :; do case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" @@ -342,7 +352,7 @@ $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | +printf "%s\n" X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -381,12 +391,13 @@ as_fn_executable_p () # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null +then : eval 'as_fn_append () { eval $1+=\$2 }' -else +else $as_nop as_fn_append () { eval $1=\$$1\$2 @@ -398,18 +409,27 @@ fi # as_fn_append # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null +then : eval 'as_fn_arith () { as_val=$(( $* )) }' -else +else $as_nop as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith +# as_fn_nop +# --------- +# Do nothing but, unlike ":", preserve the value of $?. +as_fn_nop () +{ + return $? +} +as_nop=as_fn_nop # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- @@ -421,9 +441,9 @@ as_fn_error () as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi - $as_echo "$as_me: error: $2" >&2 + printf "%s\n" "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error @@ -450,7 +470,7 @@ as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | +printf "%s\n" X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q @@ -494,7 +514,7 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall @@ -508,6 +528,10 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits exit } + +# Determine whether it's possible to make 'echo' print without a newline. +# These variables are no longer used directly by Autoconf, but are AC_SUBSTed +# for compatibility with existing Makefiles. ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) @@ -521,6 +545,13 @@ case `echo -n x` in #((((( ECHO_N='-n';; esac +# For backward compatibility with old third-party macros, we provide +# the shell variables $as_echo and $as_echo_n. New code should use +# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. +as_echo='printf %s\n' +as_echo_n='printf %s' + + rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file @@ -598,40 +629,36 @@ PACKAGE_URL='' ac_unique_file="src" # Factoring default headers for most tests. ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include +#include +#ifdef HAVE_STDIO_H +# include #endif -#ifdef STDC_HEADERS +#ifdef HAVE_STDLIB_H # include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif #endif #ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif # include #endif -#ifdef HAVE_STRINGS_H -# include -#endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif #ifdef HAVE_UNISTD_H # include #endif" +ac_header_c_list= ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS @@ -639,43 +666,50 @@ LIBOBJS ALLOCA HAVE_SYS_UN_H_FALSE HAVE_SYS_UN_H_TRUE +USE_OSSFUZZ_STATIC_FALSE +USE_OSSFUZZ_STATIC_TRUE +USE_OSSFUZZ_FLAG_FALSE +USE_OSSFUZZ_FLAG_TRUE +LIB_FUZZING_ENGINE +USE_OSSFUZZERS_FALSE +USE_OSSFUZZERS_TRUE BUILD_EXAMPLES_FALSE BUILD_EXAMPLES_TRUE +CPP LIBSREQUIRED LIBZ_PREFIX LTLIBZ LIBZ HAVE_LIBZ -OS400QC3_FALSE -OS400QC3_TRUE +WINCNG_FALSE +WINCNG_TRUE MBEDTLS_FALSE MBEDTLS_TRUE LIBGCRYPT_FALSE LIBGCRYPT_TRUE -WINCNG_FALSE -WINCNG_TRUE OPENSSL_FALSE OPENSSL_TRUE -LIBSSL_PREFIX -LTLIBSSL -LIBSSL -HAVE_LIBSSL -LIBMBEDTLS_PREFIX -LTLIBMBEDTLS -LIBMBEDTLS -HAVE_LIBMBEDTLS -LIBCRYPT32_PREFIX -LTLIBCRYPT32 -LIBCRYPT32 -HAVE_LIBCRYPT32 LIBBCRYPT_PREFIX LTLIBBCRYPT LIBBCRYPT HAVE_LIBBCRYPT +LIBCRYPT32_PREFIX +LTLIBCRYPT32 +LIBCRYPT32 +HAVE_LIBCRYPT32 +LIBMBEDCRYPTO_PREFIX +LTLIBMBEDCRYPTO +LIBMBEDCRYPTO +HAVE_LIBMBEDCRYPTO LIBGCRYPT_PREFIX LTLIBGCRYPT LIBGCRYPT HAVE_LIBGCRYPT +LIBSSL_PREFIX +LTLIBSSL +LIBSSL +HAVE_LIBSSL +CXXCPP LT_SYS_LIBRARY_PATH OTOOL64 OTOOL @@ -691,6 +725,8 @@ ac_ct_DUMPBIN DUMPBIN LD FGREP +EGREP +GREP LIBTOOL OBJDUMP DLLTOOL @@ -699,9 +735,12 @@ SSHD_FALSE SSHD_TRUE SSHD LN_S -EGREP -GREP -CPP +am__fastdepCXX_FALSE +am__fastdepCXX_TRUE +CXXDEPMODE +ac_ct_CXX +CXXFLAGS +CXX am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE @@ -709,7 +748,6 @@ am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE -am__quote am__include DEPDIR OBJEXT @@ -728,6 +766,9 @@ build_vendor build_cpu build LIBSSH2VER +CSCOPE +ETAGS +CTAGS am__untar am__tar AMTAR @@ -797,7 +838,8 @@ PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR -SHELL' +SHELL +am__quote' ac_subst_files='' ac_user_opts=' enable_option_checking @@ -813,17 +855,14 @@ with_gnu_ld with_sysroot enable_libtool_lock enable_largefile -with_openssl -with_libgcrypt +with_crypto enable_rpath +with_libssl_prefix with_libgcrypt_prefix -with_wincng -with_libbcrypt_prefix +with_libmbedcrypto_prefix with_libcrypt32_prefix -with_mbedtls -with_libmbedtls_prefix +with_libbcrypt_prefix with_libz -with_libssl_prefix with_libz_prefix enable_crypt_none enable_mac_none @@ -832,6 +871,8 @@ enable_clear_memory enable_debug enable_hidden_symbols enable_examples_build +enable_ossfuzzers +enable_werror ' ac_precious_vars='build_alias host_alias @@ -841,8 +882,12 @@ CFLAGS LDFLAGS LIBS CPPFLAGS -CPP -LT_SYS_LIBRARY_PATH' +CXX +CXXFLAGS +CCC +LT_SYS_LIBRARY_PATH +CXXCPP +CPP' # Initialize some variables set by options. @@ -911,8 +956,6 @@ do *) ac_optarg=yes ;; esac - # Accept the important Cygnus configure options, so we can diagnose typos. - case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; @@ -953,9 +996,9 @@ do ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" + as_fn_error $? "invalid feature name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" @@ -979,9 +1022,9 @@ do ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" + as_fn_error $? "invalid feature name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" @@ -1192,9 +1235,9 @@ do ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" + as_fn_error $? "invalid package name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" @@ -1208,9 +1251,9 @@ do ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" + as_fn_error $? "invalid package name: \`$ac_useropt'" ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" @@ -1254,9 +1297,9 @@ Try \`$0 --help' for more information" *) # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; @@ -1272,7 +1315,7 @@ if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + *) printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi @@ -1336,7 +1379,7 @@ $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | +printf "%s\n" X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -1501,6 +1544,9 @@ Optional Features: --enable-examples-build Build example applications (this is the default) --disable-examples-build Do not build example applications + --enable-ossfuzzers Whether to generate the fuzzers for OSS-Fuzz + --enable-werror Enable compiler warnings as errors + --disable-werror Disable compiler warnings as errors Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] @@ -1513,22 +1559,20 @@ Optional Packages: --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-sysroot[=DIR] Search for dependent libraries within DIR (or the compiler's sysroot if not specified). - --with-openssl Use OpenSSL for crypto - --with-libgcrypt Use libgcrypt for crypto + --with-crypto=auto|openssl|libgcrypt|mbedtls|wincng + Select crypto backend (default: auto) --with-gnu-ld assume the C compiler uses GNU ld default=no + --with-libssl-prefix[=DIR] search for libssl in DIR/include and DIR/lib + --without-libssl-prefix don't search for libssl in includedir and libdir --with-libgcrypt-prefix[=DIR] search for libgcrypt in DIR/include and DIR/lib --without-libgcrypt-prefix don't search for libgcrypt in includedir and libdir - --with-wincng Use Windows CNG for crypto - --with-libbcrypt-prefix[=DIR] search for libbcrypt in DIR/include and DIR/lib - --without-libbcrypt-prefix don't search for libbcrypt in includedir and libdir + --with-libmbedcrypto-prefix[=DIR] search for libmbedcrypto in DIR/include and DIR/lib + --without-libmbedcrypto-prefix don't search for libmbedcrypto in includedir and libdir --with-libcrypt32-prefix[=DIR] search for libcrypt32 in DIR/include and DIR/lib --without-libcrypt32-prefix don't search for libcrypt32 in includedir and libdir - --with-mbedtls Use mbedTLS for crypto - --with-libmbedtls-prefix[=DIR] search for libmbedtls in DIR/include and DIR/lib - --without-libmbedtls-prefix don't search for libmbedtls in includedir and libdir - --with-libz Use zlib for compression - --with-libssl-prefix[=DIR] search for libssl in DIR/include and DIR/lib - --without-libssl-prefix don't search for libssl in includedir and libdir + --with-libbcrypt-prefix[=DIR] search for libbcrypt in DIR/include and DIR/lib + --without-libbcrypt-prefix don't search for libbcrypt in includedir and libdir + --with-libz Use libz for compression --with-libz-prefix[=DIR] search for libz in DIR/include and DIR/lib --without-libz-prefix don't search for libz in includedir and libdir @@ -1540,9 +1584,12 @@ Some influential environment variables: LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory - CPP C preprocessor + CXX C++ compiler command + CXXFLAGS C++ compiler flags LT_SYS_LIBRARY_PATH User-defined run-time library search path. + CXXCPP C++ preprocessor + CPP C preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. @@ -1563,9 +1610,9 @@ if test "$ac_init_help" = "recursive"; then case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; @@ -1593,7 +1640,8 @@ esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. + # Check for configure.gnu first; this name is used for a wrapper for + # Metaconfig's "Configure" on case-insensitive file systems. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive @@ -1601,7 +1649,7 @@ ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix echo && $SHELL "$ac_srcdir/configure" --help=recursive else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done @@ -1611,9 +1659,9 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF libssh2 configure - -generated by GNU Autoconf 2.69 +generated by GNU Autoconf 2.71 -Copyright (C) 2012 Free Software Foundation, Inc. +Copyright (C) 2021 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF @@ -1630,14 +1678,14 @@ fi ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext + rm -f conftest.$ac_objext conftest.beam if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then @@ -1645,14 +1693,15 @@ $as_echo "$ac_try_echo"; } >&5 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err - } && test -s conftest.$ac_objext; then : + } && test -s conftest.$ac_objext +then : ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 @@ -1669,17 +1718,18 @@ fi ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +printf %s "checking for $2... " >&6; } +if eval test \${$3+y} +then : + printf %s "(cached) " >&6 +else $as_nop eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int -main () +main (void) { if (sizeof ($2)) return 0; @@ -1687,12 +1737,13 @@ if (sizeof ($2)) return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int -main () +main (void) { if (sizeof (($2))) return 0; @@ -1700,101 +1751,23 @@ if (sizeof (($2))) return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : -else +else $as_nop eval "$3=yes" fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in @@ -1802,26 +1775,28 @@ fi ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +printf %s "checking for $2... " >&6; } +if eval test \${$3+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : eval "$3=yes" -else +else $as_nop eval "$3=no" fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile @@ -1832,14 +1807,14 @@ $as_echo "$ac_res" >&6; } ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext + rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then @@ -1847,17 +1822,18 @@ $as_echo "$ac_try_echo"; } >&5 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext - }; then : + } +then : ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 @@ -1872,17 +1848,57 @@ fi } # ac_fn_c_try_link +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest.beam + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext +then : + ac_retval=0 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +printf %s "checking for $2... " >&6; } +if eval test \${$3+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. @@ -1890,16 +1906,9 @@ else #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif + which can conflict with char $2 (); below. */ +#include #undef $2 /* Override any GCC internal prototype to avoid an error. @@ -1917,172 +1926,274 @@ choke me #endif int -main () +main (void) { return $2 (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +if ac_fn_c_try_link "$LINENO" +then : eval "$3=yes" -else +else $as_nop eval "$3=no" fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func -# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES -# --------------------------------------------- -# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR -# accordingly. -ac_fn_c_check_decl () +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - as_decl_name=`echo $2|sed 's/ *(.*//'` - as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 -$as_echo_n "checking whether $as_decl_name is declared... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -#ifndef $as_decl_name -#ifdef __cplusplus - (void) $as_decl_use; -#else - (void) $as_decl_name; -#endif -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + } +then : + ac_retval=0 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval -} # ac_fn_c_check_decl +} # ac_fn_cxx_try_cpp -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if eval \${$3+:} false; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 + rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + } +then : + ac_retval=0 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to run conftest.$ac_ext, and return whether this succeeded. Assumes that +# executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; } +then : + ac_retval=0 +else $as_nop + printf "%s\n" "$as_me: program exited with status $ac_status" >&5 + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +} # ac_fn_c_try_run + +# ac_fn_check_decl LINENO SYMBOL VAR INCLUDES EXTRA-OPTIONS FLAG-VAR +# ------------------------------------------------------------------ +# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR +# accordingly. Pass EXTRA-OPTIONS to the compiler, using FLAG-VAR. +ac_fn_check_decl () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + as_decl_name=`echo $2|sed 's/ *(.*//'` + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 +printf %s "checking whether $as_decl_name is declared... " >&6; } +if eval test \${$3+y} +then : + printf %s "(cached) " >&6 +else $as_nop + as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` + eval ac_save_FLAGS=\$$6 + as_fn_append $6 " $5" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include <$2> +$4 +int +main (void) +{ +#ifndef $as_decl_name +#ifdef __cplusplus + (void) $as_decl_use; +#else + (void) $as_decl_name; +#endif +#endif + + ; + return 0; +} _ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no +if ac_fn_c_try_compile "$LINENO" +then : + eval "$3=yes" +else $as_nop + eval "$3=no" fi -rm -f conftest.err conftest.i conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} -( $as_echo "## ----------------------------------------- ## -## Report this to libssh2-devel@cool.haxx.se ## -## ----------------------------------------- ##" - ) | sed "s/^/$as_me: WARNING: /" >&2 - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + eval $6=\$ac_save_FLAGS + fi eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +printf "%s\n" "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_check_decl + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + } +then : + ac_retval=0 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp +ac_configure_args_raw= +for ac_arg +do + case $ac_arg in + *\'*) + ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append ac_configure_args_raw " '$ac_arg'" +done + +case $ac_configure_args_raw in + *$as_nl*) + ac_safe_unquote= ;; + *) + ac_unsafe_z='|&;<>()$`\\"*?[ '' ' # This string ends in space, tab. + ac_unsafe_a="$ac_unsafe_z#~" + ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g" + ac_configure_args_raw=` printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;; +esac -} # ac_fn_c_check_header_mongrel cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by libssh2 $as_me -, which was -generated by GNU Autoconf 2.69. Invocation command line was +generated by GNU Autoconf 2.71. Invocation command line was - $ $0 $@ + $ $0$ac_configure_args_raw _ACEOF exec 5>>config.log @@ -2115,8 +2226,12 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + printf "%s\n" "PATH: $as_dir" done IFS=$as_save_IFS @@ -2151,7 +2266,7 @@ do | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; @@ -2186,11 +2301,13 @@ done # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? + # Sanitize IFS. + IFS=" "" $as_nl" # Save into config.log some information that might help in debugging. { echo - $as_echo "## ---------------- ## + printf "%s\n" "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo @@ -2201,8 +2318,8 @@ trap 'exit_status=$? case $ac_val in #( *${as_nl}*) case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( @@ -2226,7 +2343,7 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; ) echo - $as_echo "## ----------------- ## + printf "%s\n" "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo @@ -2234,14 +2351,14 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; do eval ac_val=\$$ac_var case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac - $as_echo "$ac_var='\''$ac_val'\''" + printf "%s\n" "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## + printf "%s\n" "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo @@ -2249,15 +2366,15 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; do eval ac_val=\$$ac_var case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac - $as_echo "$ac_var='\''$ac_val'\''" + printf "%s\n" "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then - $as_echo "## ----------- ## + printf "%s\n" "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo @@ -2265,8 +2382,8 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; echo fi test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" + printf "%s\n" "$as_me: caught signal $ac_signal" + printf "%s\n" "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && @@ -2280,63 +2397,48 @@ ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h -$as_echo "/* confdefs.h */" > confdefs.h +printf "%s\n" "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF +printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF +printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF +printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF +printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF +printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF +printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac + ac_site_files="$CONFIG_SITE" elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site + ac_site_files="$prefix/share/config.site $prefix/etc/config.site" else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site + ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site" fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" + +for ac_site_file in $ac_site_files do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} + case $ac_site_file in #( + */*) : + ;; #( + *) : + ac_site_file=./$ac_site_file ;; +esac + if test -f "$ac_site_file" && test -r "$ac_site_file"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi @@ -2346,103 +2448,736 @@ if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +printf "%s\n" "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +printf "%s\n" "$as_me: creating cache $cache_file" >&6;} >$cache_file fi -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## +# Test code for whether the C compiler supports C89 (global declarations) +ac_c_conftest_c89_globals=' +/* Does the compiler advertise C89 conformance? + Do not test the value of __STDC__, because some compilers set it to 0 + while being otherwise adequately conformant. */ +#if !defined __STDC__ +# error "Compiler does not advertise C89 conformance" +#endif -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7 src/conf.sh. */ +struct buf { int x; }; +struct buf * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not \xHH hex character constants. + These do not provoke an error unfortunately, instead are silently treated + as an "x". The following induces an error, until -std is added to get + proper ANSI mode. Curiously \x00 != x always comes out true, for an + array size at least. It is necessary to write \x00 == 0 to get something + that is true only with -std. */ +int osf4_cc_array ['\''\x00'\'' == 0 ? 1 : -1]; +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) '\''x'\'' +int xlc6_cc_array[FOO(a) == '\''x'\'' ? 1 : -1]; +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, int *(*)(struct buf *, struct stat *, int), + int, int);' -ac_config_headers="$ac_config_headers src/libssh2_config.h example/libssh2_config.h" +# Test code for whether the C compiler supports C89 (body of main). +ac_c_conftest_c89_main=' +ok |= (argc == 0 || f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]); +' +# Test code for whether the C compiler supports C99 (global declarations) +ac_c_conftest_c99_globals=' +// Does the compiler advertise C99 conformance? +#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 199901L +# error "Compiler does not advertise C99 conformance" +#endif -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 -$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } - # Check whether --enable-maintainer-mode was given. -if test "${enable_maintainer_mode+set}" = set; then : - enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval -else - USE_MAINTAINER_MODE=no -fi +#include +extern int puts (const char *); +extern int printf (const char *, ...); +extern int dprintf (int, const char *, ...); +extern void *malloc (size_t); + +// Check varargs macros. These examples are taken from C99 6.10.3.5. +// dprintf is used instead of fprintf to avoid needing to declare +// FILE and stderr. +#define debug(...) dprintf (2, __VA_ARGS__) +#define showlist(...) puts (#__VA_ARGS__) +#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) +static void +test_varargs_macros (void) +{ + int x = 1234; + int y = 5678; + debug ("Flag"); + debug ("X = %d\n", x); + showlist (The first, second, and third items.); + report (x>y, "x is %d but y is %d", x, y); +} - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 -$as_echo "$USE_MAINTAINER_MODE" >&6; } +// Check long long types. +#define BIG64 18446744073709551615ull +#define BIG32 4294967295ul +#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) +#if !BIG_OK + #error "your preprocessor is broken" +#endif +#if BIG_OK +#else + #error "your preprocessor is broken" +#endif +static long long int bignum = -9223372036854775807LL; +static unsigned long long int ubignum = BIG64; + +struct incomplete_array +{ + int datasize; + double data[]; +}; + +struct named_init { + int number; + const wchar_t *name; + double average; +}; + +typedef const char *ccp; + +static inline int +test_restrict (ccp restrict text) +{ + // See if C++-style comments work. + // Iterate through items via the restricted pointer. + // Also check for declarations in for loops. + for (unsigned int i = 0; *(text+i) != '\''\0'\''; ++i) + continue; + return 0; +} + +// Check varargs and va_copy. +static bool +test_varargs (const char *format, ...) +{ + va_list args; + va_start (args, format); + va_list args_copy; + va_copy (args_copy, args); + + const char *str = ""; + int number = 0; + float fnumber = 0; + + while (*format) + { + switch (*format++) + { + case '\''s'\'': // string + str = va_arg (args_copy, const char *); + break; + case '\''d'\'': // int + number = va_arg (args_copy, int); + break; + case '\''f'\'': // float + fnumber = va_arg (args_copy, double); + break; + default: + break; + } + } + va_end (args_copy); + va_end (args); + + return *str && number && fnumber; +} +' + +# Test code for whether the C compiler supports C99 (body of main). +ac_c_conftest_c99_main=' + // Check bool. + _Bool success = false; + success |= (argc != 0); + + // Check restrict. + if (test_restrict ("String literal") == 0) + success = true; + char *restrict newvar = "Another string"; + + // Check varargs. + success &= test_varargs ("s, d'\'' f .", "string", 65, 34.234); + test_varargs_macros (); + + // Check flexible array members. + struct incomplete_array *ia = + malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); + ia->datasize = 10; + for (int i = 0; i < ia->datasize; ++i) + ia->data[i] = i * 1.234; + + // Check named initializers. + struct named_init ni = { + .number = 34, + .name = L"Test wide string", + .average = 543.34343, + }; + + ni.number = 58; + + int dynamic_array[ni.number]; + dynamic_array[0] = argv[0][0]; + dynamic_array[ni.number - 1] = 543; + + // work around unused variable warnings + ok |= (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == '\''x'\'' + || dynamic_array[ni.number - 1] != 543); +' + +# Test code for whether the C compiler supports C11 (global declarations) +ac_c_conftest_c11_globals=' +// Does the compiler advertise C11 conformance? +#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L +# error "Compiler does not advertise C11 conformance" +#endif + +// Check _Alignas. +char _Alignas (double) aligned_as_double; +char _Alignas (0) no_special_alignment; +extern char aligned_as_int; +char _Alignas (0) _Alignas (int) aligned_as_int; + +// Check _Alignof. +enum +{ + int_alignment = _Alignof (int), + int_array_alignment = _Alignof (int[100]), + char_alignment = _Alignof (char) +}; +_Static_assert (0 < -_Alignof (int), "_Alignof is signed"); + +// Check _Noreturn. +int _Noreturn does_not_return (void) { for (;;) continue; } + +// Check _Static_assert. +struct test_static_assert +{ + int x; + _Static_assert (sizeof (int) <= sizeof (long int), + "_Static_assert does not work in struct"); + long int y; +}; + +// Check UTF-8 literals. +#define u8 syntax error! +char const utf8_literal[] = u8"happens to be ASCII" "another string"; + +// Check duplicate typedefs. +typedef long *long_ptr; +typedef long int *long_ptr; +typedef long_ptr long_ptr; + +// Anonymous structures and unions -- taken from C11 6.7.2.1 Example 1. +struct anonymous +{ + union { + struct { int i; int j; }; + struct { int k; long int l; } w; + }; + int m; +} v1; +' + +# Test code for whether the C compiler supports C11 (body of main). +ac_c_conftest_c11_main=' + _Static_assert ((offsetof (struct anonymous, i) + == offsetof (struct anonymous, w.k)), + "Anonymous union alignment botch"); + v1.i = 2; + v1.w.k = 5; + ok |= v1.i != 5; +' + +# Test code for whether the C compiler supports C11 (complete). +ac_c_conftest_c11_program="${ac_c_conftest_c89_globals} +${ac_c_conftest_c99_globals} +${ac_c_conftest_c11_globals} + +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_c_conftest_c89_main} + ${ac_c_conftest_c99_main} + ${ac_c_conftest_c11_main} + return ok; +} +" + +# Test code for whether the C compiler supports C99 (complete). +ac_c_conftest_c99_program="${ac_c_conftest_c89_globals} +${ac_c_conftest_c99_globals} + +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_c_conftest_c89_main} + ${ac_c_conftest_c99_main} + return ok; +} +" + +# Test code for whether the C compiler supports C89 (complete). +ac_c_conftest_c89_program="${ac_c_conftest_c89_globals} + +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_c_conftest_c89_main} + return ok; +} +" + +as_fn_append ac_header_c_list " stdio.h stdio_h HAVE_STDIO_H" +as_fn_append ac_header_c_list " stdlib.h stdlib_h HAVE_STDLIB_H" +as_fn_append ac_header_c_list " string.h string_h HAVE_STRING_H" +as_fn_append ac_header_c_list " inttypes.h inttypes_h HAVE_INTTYPES_H" +as_fn_append ac_header_c_list " stdint.h stdint_h HAVE_STDINT_H" +as_fn_append ac_header_c_list " strings.h strings_h HAVE_STRINGS_H" +as_fn_append ac_header_c_list " sys/stat.h sys_stat_h HAVE_SYS_STAT_H" +as_fn_append ac_header_c_list " sys/types.h sys_types_h HAVE_SYS_TYPES_H" +as_fn_append ac_header_c_list " unistd.h unistd_h HAVE_UNISTD_H" +# Test code for whether the C++ compiler supports C++98 (global declarations) +ac_cxx_conftest_cxx98_globals=' +// Does the compiler advertise C++98 conformance? +#if !defined __cplusplus || __cplusplus < 199711L +# error "Compiler does not advertise C++98 conformance" +#endif + +// These inclusions are to reject old compilers that +// lack the unsuffixed header files. +#include +#include + +// and are *not* freestanding headers in C++98. +extern void assert (int); +namespace std { + extern int strcmp (const char *, const char *); +} + +// Namespaces, exceptions, and templates were all added after "C++ 2.0". +using std::exception; +using std::strcmp; + +namespace { + +void test_exception_syntax() +{ + try { + throw "test"; + } catch (const char *s) { + // Extra parentheses suppress a warning when building autoconf itself, + // due to lint rules shared with more typical C programs. + assert (!(strcmp) (s, "test")); + } +} + +template struct test_template +{ + T const val; + explicit test_template(T t) : val(t) {} + template T add(U u) { return static_cast(u) + val; } +}; + +} // anonymous namespace +' + +# Test code for whether the C++ compiler supports C++98 (body of main) +ac_cxx_conftest_cxx98_main=' + assert (argc); + assert (! argv[0]); +{ + test_exception_syntax (); + test_template tt (2.0); + assert (tt.add (4) == 6.0); + assert (true && !false); +} +' + +# Test code for whether the C++ compiler supports C++11 (global declarations) +ac_cxx_conftest_cxx11_globals=' +// Does the compiler advertise C++ 2011 conformance? +#if !defined __cplusplus || __cplusplus < 201103L +# error "Compiler does not advertise C++11 conformance" +#endif + +namespace cxx11test +{ + constexpr int get_val() { return 20; } + + struct testinit + { + int i; + double d; + }; + + class delegate + { + public: + delegate(int n) : n(n) {} + delegate(): delegate(2354) {} + + virtual int getval() { return this->n; }; + protected: + int n; + }; + + class overridden : public delegate + { + public: + overridden(int n): delegate(n) {} + virtual int getval() override final { return this->n * 2; } + }; + + class nocopy + { + public: + nocopy(int i): i(i) {} + nocopy() = default; + nocopy(const nocopy&) = delete; + nocopy & operator=(const nocopy&) = delete; + private: + int i; + }; + + // for testing lambda expressions + template Ret eval(Fn f, Ret v) + { + return f(v); + } + + // for testing variadic templates and trailing return types + template auto sum(V first) -> V + { + return first; + } + template auto sum(V first, Args... rest) -> V + { + return first + sum(rest...); + } +} +' + +# Test code for whether the C++ compiler supports C++11 (body of main) +ac_cxx_conftest_cxx11_main=' +{ + // Test auto and decltype + auto a1 = 6538; + auto a2 = 48573953.4; + auto a3 = "String literal"; + + int total = 0; + for (auto i = a3; *i; ++i) { total += *i; } + + decltype(a2) a4 = 34895.034; +} +{ + // Test constexpr + short sa[cxx11test::get_val()] = { 0 }; +} +{ + // Test initializer lists + cxx11test::testinit il = { 4323, 435234.23544 }; +} +{ + // Test range-based for + int array[] = {9, 7, 13, 15, 4, 18, 12, 10, 5, 3, + 14, 19, 17, 8, 6, 20, 16, 2, 11, 1}; + for (auto &x : array) { x += 23; } +} +{ + // Test lambda expressions + using cxx11test::eval; + assert (eval ([](int x) { return x*2; }, 21) == 42); + double d = 2.0; + assert (eval ([&](double x) { return d += x; }, 3.0) == 5.0); + assert (d == 5.0); + assert (eval ([=](double x) mutable { return d += x; }, 4.0) == 9.0); + assert (d == 5.0); +} +{ + // Test use of variadic templates + using cxx11test::sum; + auto a = sum(1); + auto b = sum(1, 2); + auto c = sum(1.0, 2.0, 3.0); +} +{ + // Test constructor delegation + cxx11test::delegate d1; + cxx11test::delegate d2(); + cxx11test::delegate d3(45); +} +{ + // Test override and final + cxx11test::overridden o1(55464); +} +{ + // Test nullptr + char *c = nullptr; +} +{ + // Test template brackets + test_template<::test_template> v(test_template(12)); +} +{ + // Unicode literals + char const *utf8 = u8"UTF-8 string \u2500"; + char16_t const *utf16 = u"UTF-8 string \u2500"; + char32_t const *utf32 = U"UTF-32 string \u2500"; +} +' + +# Test code for whether the C compiler supports C++11 (complete). +ac_cxx_conftest_cxx11_program="${ac_cxx_conftest_cxx98_globals} +${ac_cxx_conftest_cxx11_globals} + +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_cxx_conftest_cxx98_main} + ${ac_cxx_conftest_cxx11_main} + return ok; +} +" + +# Test code for whether the C compiler supports C++98 (complete). +ac_cxx_conftest_cxx98_program="${ac_cxx_conftest_cxx98_globals} +int +main (int argc, char **argv) +{ + int ok = 0; + ${ac_cxx_conftest_cxx98_main} + return ok; +} +" + + +# Auxiliary files required by this configure script. +ac_aux_files="config.rpath ltmain.sh compile config.guess config.sub missing install-sh" + +# Locations in which to look for auxiliary files. +ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.." + +# Search for a directory containing all of the required auxiliary files, +# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates. +# If we don't find one directory that contains all the files we need, +# we report the set of missing files from the *first* directory in +# $ac_aux_dir_candidates and give up. +ac_missing_aux_files="" +ac_first_candidate=: +printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5 +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in $ac_aux_dir_candidates +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + as_found=: + + printf "%s\n" "$as_me:${as_lineno-$LINENO}: trying $as_dir" >&5 + ac_aux_dir_found=yes + ac_install_sh= + for ac_aux in $ac_aux_files + do + # As a special case, if "install-sh" is required, that requirement + # can be satisfied by any of "install-sh", "install.sh", or "shtool", + # and $ac_install_sh is set appropriately for whichever one is found. + if test x"$ac_aux" = x"install-sh" + then + if test -f "${as_dir}install-sh"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install-sh found" >&5 + ac_install_sh="${as_dir}install-sh -c" + elif test -f "${as_dir}install.sh"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}install.sh found" >&5 + ac_install_sh="${as_dir}install.sh -c" + elif test -f "${as_dir}shtool"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}shtool found" >&5 + ac_install_sh="${as_dir}shtool install -c" + else + ac_aux_dir_found=no + if $ac_first_candidate; then + ac_missing_aux_files="${ac_missing_aux_files} install-sh" + else + break + fi + fi + else + if test -f "${as_dir}${ac_aux}"; then + printf "%s\n" "$as_me:${as_lineno-$LINENO}: ${as_dir}${ac_aux} found" >&5 + else + ac_aux_dir_found=no + if $ac_first_candidate; then + ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}" + else + break + fi + fi + fi + done + if test "$ac_aux_dir_found" = yes; then + ac_aux_dir="$as_dir" + break + fi + ac_first_candidate=false + + as_found=false +done +IFS=$as_save_IFS +if $as_found +then : + +else $as_nop + as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5 +fi + + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +if test -f "${ac_aux_dir}config.guess"; then + ac_config_guess="$SHELL ${ac_aux_dir}config.guess" +fi +if test -f "${ac_aux_dir}config.sub"; then + ac_config_sub="$SHELL ${ac_aux_dir}config.sub" +fi +if test -f "$ac_aux_dir/configure"; then + ac_configure="$SHELL ${ac_aux_dir}configure" +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +printf "%s\n" "$as_me: former value: \`$ac_old_val'" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +printf "%s\n" "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file' + and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + +ac_config_headers="$ac_config_headers src/libssh2_config.h" + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 +printf %s "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } + # Check whether --enable-maintainer-mode was given. +if test ${enable_maintainer_mode+y} +then : + enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval +else $as_nop + USE_MAINTAINER_MODE=no +fi + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 +printf "%s\n" "$USE_MAINTAINER_MODE" >&6; } if test $USE_MAINTAINER_MODE = yes; then MAINTAINER_MODE_TRUE= MAINTAINER_MODE_FALSE='#' @@ -2455,7 +3190,8 @@ fi # Check whether --enable-silent-rules was given. -if test "${enable_silent_rules+set}" = set; then : +if test ${enable_silent_rules+y} +then : enableval=$enable_silent_rules; fi @@ -2465,12 +3201,13 @@ case $enable_silent_rules in # ((( *) AM_DEFAULT_VERBOSITY=0;; esac am_make=${MAKE-make} -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 -$as_echo_n "checking whether $am_make supports nested variables... " >&6; } -if ${am_cv_make_support_nested_variables+:} false; then : - $as_echo_n "(cached) " >&6 -else - if $as_echo 'TRUE=$(BAR$(V)) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +printf %s "checking whether $am_make supports nested variables... " >&6; } +if test ${am_cv_make_support_nested_variables+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if printf "%s\n" 'TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 @@ -2482,8 +3219,8 @@ else am_cv_make_support_nested_variables=no fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 -$as_echo "$am_cv_make_support_nested_variables" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +printf "%s\n" "$am_cv_make_support_nested_variables" >&6; } if test $am_cv_make_support_nested_variables = yes; then AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' @@ -2496,11 +3233,12 @@ AM_BACKSLASH='\' # Extract the first word of "sed", so it can be a program name with args. set dummy sed; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_SED+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_path_SED+y} +then : + printf %s "(cached) " >&6 +else $as_nop case $SED in [\\/]* | ?:[\\/]*) ac_cv_path_SED="$SED" # Let the user override the test with a path. @@ -2511,11 +3249,15 @@ as_dummy="$PATH:/usr/bin:/usr/local/bin" for as_dir in $as_dummy do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_SED="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_path_SED="$as_dir$ac_word$ac_exec_ext" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2528,54 +3270,27 @@ esac fi SED=$ac_cv_path_SED if test -n "$SED"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SED" >&5 -$as_echo "$SED" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $SED" >&5 +printf "%s\n" "$SED" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi if test "x$SED" = "xsed-was-not-found-by-configure"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: sed was not found, this may ruin your chances to build fine" >&5 -$as_echo "$as_me: WARNING: sed was not found, this may ruin your chances to build fine" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: sed was not found, this may ruin your chances to build fine" >&5 +printf "%s\n" "$as_me: WARNING: sed was not found, this may ruin your chances to build fine" >&2;} fi LIBSSH2VER=`$SED -ne 's/^#define LIBSSH2_VERSION *"\(.*\)"/\1/p' ${srcdir}/include/libssh2.h` -am__api_version='1.15' +am__api_version='1.16' -ac_aux_dir= -for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do - if test -f "$ac_dir/install-sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install-sh -c" - break - elif test -f "$ac_dir/install.sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install.sh -c" - break - elif test -f "$ac_dir/shtool"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/shtool install -c" - break - fi -done -if test -z "$ac_aux_dir"; then - as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 -fi - -# These three variables are undocumented and unsupported, -# and are intended to be withdrawn in a future Autoconf release. -# They can cause serious problems if a builder's source tree is in a directory -# whose full name contains unusual characters. -ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. -ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. -ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. -# Find a good install program. We prefer a C program (faster), + # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install @@ -2589,20 +3304,25 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 -$as_echo_n "checking for a BSD-compatible install... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +printf %s "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then -if ${ac_cv_path_install+:} false; then : - $as_echo_n "(cached) " >&6 -else +if test ${ac_cv_path_install+y} +then : + printf %s "(cached) " >&6 +else $as_nop as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - # Account for people who put trailing slashes in PATH elements. -case $as_dir/ in #(( - ./ | .// | /[cC]/* | \ + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + # Account for fact that we put trailing slashes in our PATH walk. +case $as_dir in #(( + ./ | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; @@ -2612,13 +3332,13 @@ case $as_dir/ in #(( # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then if test $ac_prog = install && - grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && - grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else @@ -2626,12 +3346,12 @@ case $as_dir/ in #(( echo one > conftest.one echo two > conftest.two mkdir conftest.dir - if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then - ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c" break 3 fi fi @@ -2647,7 +3367,7 @@ IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi - if test "${ac_cv_path_install+set}" = set; then + if test ${ac_cv_path_install+y}; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a @@ -2657,8 +3377,8 @@ fi INSTALL=$ac_install_sh fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 -$as_echo "$INSTALL" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +printf "%s\n" "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. @@ -2668,8 +3388,8 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 -$as_echo_n "checking whether build environment is sane... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +printf %s "checking whether build environment is sane... " >&6; } # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' @@ -2723,8 +3443,8 @@ else as_fn_error $? "newly created file is older than distributed files! Check your system clock" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= @@ -2743,26 +3463,23 @@ test "$program_suffix" != NONE && # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' -program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` +program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"` + # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` -if test x"${MISSING+set}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; - *) - MISSING="\${SHELL} $am_aux_dir/missing" ;; - esac + + if test x"${MISSING+set}" != xset; then + MISSING="\${SHELL} '$am_aux_dir/missing'" fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 -$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 +printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;} fi if test x"${install_sh+set}" != xset; then @@ -2782,11 +3499,12 @@ if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else @@ -2794,11 +3512,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2809,11 +3531,11 @@ fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 -$as_echo "$STRIP" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +printf "%s\n" "$STRIP" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -2822,11 +3544,12 @@ if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else @@ -2834,11 +3557,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2849,11 +3576,11 @@ fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 -$as_echo "$ac_ct_STRIP" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +printf "%s\n" "$ac_ct_STRIP" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then @@ -2861,8 +3588,8 @@ fi else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP @@ -2874,25 +3601,31 @@ fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 -$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5 +printf %s "checking for a race-free mkdir -p... " >&6; } if test -z "$MKDIR_P"; then - if ${ac_cv_path_mkdir+:} false; then : - $as_echo_n "(cached) " >&6 -else + if test ${ac_cv_path_mkdir+y} +then : + printf %s "(cached) " >&6 +else $as_nop as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do - as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue - case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( - 'mkdir (GNU coreutils) '* | \ - 'mkdir (coreutils) '* | \ + as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue + case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir ('*'coreutils) '* | \ + 'BusyBox '* | \ 'mkdir (fileutils) '4.1*) - ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext break 3;; esac done @@ -2903,7 +3636,7 @@ IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version - if test "${ac_cv_path_mkdir+set}" = set; then + if test ${ac_cv_path_mkdir+y}; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a @@ -2913,18 +3646,19 @@ fi MKDIR_P="$ac_install_sh -d" fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 -$as_echo "$MKDIR_P" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +printf "%s\n" "$MKDIR_P" >&6; } for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_AWK+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_AWK+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else @@ -2932,11 +3666,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -2947,24 +3685,25 @@ fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 -$as_echo "$AWK" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +printf "%s\n" "$AWK" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi test -n "$AWK" && break done -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 -$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} -ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` -if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : - $as_echo_n "(cached) " >&6 -else +ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval test \${ac_cv_prog_make_${ac_make}_set+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @@ -2980,12 +3719,12 @@ esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } SET_MAKE= else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi @@ -3023,14 +3762,10 @@ fi VERSION='-' -cat >>confdefs.h <<_ACEOF -#define PACKAGE "$PACKAGE" -_ACEOF +printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h -cat >>confdefs.h <<_ACEOF -#define VERSION "$VERSION" -_ACEOF +printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h # Some tools Automake needs. @@ -3050,8 +3785,8 @@ MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: -# -# +# +# mkdir_p='$(MKDIR_P)' # We need awk for the "check" target (and possibly the TAP driver). The @@ -3070,6 +3805,20 @@ am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' +# Variables for tags utilities; see am/tags.am +if test -z "$CTAGS"; then + CTAGS=ctags +fi + +if test -z "$ETAGS"; then + ETAGS=etags +fi + +if test -z "$CSCOPE"; then + CSCOPE=cscope +fi + + # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile @@ -3102,7 +3851,7 @@ END Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation -that behaves properly: . +that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM @@ -3113,35 +3862,38 @@ END fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking libssh2 version" >&5 -$as_echo_n "checking libssh2 version... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBSSH2VER" >&5 -$as_echo "$LIBSSH2VER" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking libssh2 version" >&5 +printf %s "checking libssh2 version... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIBSSH2VER" >&5 +printf "%s\n" "$LIBSSH2VER" >&6; } AB_VERSION=$LIBSSH2VER -# Make sure we can run config.sub. -$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 -$as_echo_n "checking build system type... " >&6; } -if ${ac_cv_build+:} false; then : - $as_echo_n "(cached) " >&6 -else + + # Make sure we can run config.sub. +$SHELL "${ac_aux_dir}config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL ${ac_aux_dir}config.sub" "$LINENO" 5 + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +printf %s "checking build system type... " >&6; } +if test ${ac_cv_build+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_build_alias=$build_alias test "x$ac_build_alias" = x && - ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` + ac_build_alias=`$SHELL "${ac_aux_dir}config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 -ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 +ac_cv_build=`$SHELL "${ac_aux_dir}config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $ac_build_alias failed" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 -$as_echo "$ac_cv_build" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +printf "%s\n" "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; @@ -3160,21 +3912,22 @@ IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 -$as_echo_n "checking host system type... " >&6; } -if ${ac_cv_host+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +printf %s "checking host system type... " >&6; } +if test ${ac_cv_host+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else - ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 + ac_cv_host=`$SHELL "${ac_aux_dir}config.sub" $host_alias` || + as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $host_alias failed" "$LINENO" 5 fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 -$as_echo "$ac_cv_host" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +printf "%s\n" "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; @@ -3200,19 +3953,19 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac if test -z "$AB_PACKAGE"; then AB_PACKAGE=${PACKAGE_NAME:-$PACKAGE} fi - { $as_echo "$as_me:${as_lineno-$LINENO}: autobuild project... $AB_PACKAGE" >&5 -$as_echo "$as_me: autobuild project... $AB_PACKAGE" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: autobuild project... $AB_PACKAGE" >&5 +printf "%s\n" "$as_me: autobuild project... $AB_PACKAGE" >&6;} if test -z "$AB_VERSION"; then AB_VERSION=${PACKAGE_VERSION:-$VERSION} fi - { $as_echo "$as_me:${as_lineno-$LINENO}: autobuild revision... $AB_VERSION" >&5 -$as_echo "$as_me: autobuild revision... $AB_VERSION" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: autobuild revision... $AB_VERSION" >&5 +printf "%s\n" "$as_me: autobuild revision... $AB_VERSION" >&6;} hostname=`hostname` if test "$hostname"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: autobuild hostname... $hostname" >&5 -$as_echo "$as_me: autobuild hostname... $hostname" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: autobuild hostname... $hostname" >&5 +printf "%s\n" "$as_me: autobuild hostname... $hostname" >&6;} fi @@ -3222,8 +3975,8 @@ $as_echo "$as_me: autobuild hostname... $hostname" >&6;} date=`date` fi if test "$date"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: autobuild timestamp... $date" >&5 -$as_echo "$as_me: autobuild timestamp... $date" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: autobuild timestamp... $date" >&5 +printf "%s\n" "$as_me: autobuild timestamp... $date" >&6;} fi @@ -3236,12 +3989,9 @@ case "$host" in CFLAGS="$CFLAGS -DLIBSSH2_WIN32" LIBS="$LIBS -lws2_32" ;; - *-cygwin) - CFLAGS="$CFLAGS -DLIBSSH2_WIN32" + *darwin*) + CFLAGS="$CFLAGS -DLIBSSH2_DARWIN" ;; - *darwin*) - CFLAGS="$CFLAGS -DLIBSSH2_DARWIN" - ;; *hpux*) ;; *osf*) @@ -3251,52 +4001,62 @@ case "$host" in ;; esac + + + + + + + + + DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" - -am_make=${MAKE-make} -cat > confinc << 'END' +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 +printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; } +cat > confinc.mk << 'END' am__doit: - @echo this is the am__doit target + @echo this is the am__doit target >confinc.out .PHONY: am__doit END -# If we don't find an include directive, just comment out the code. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 -$as_echo_n "checking for style of include used by $am_make... " >&6; } am__include="#" am__quote= -_am_result=none -# First try GNU make style include. -echo "include confinc" > confmf -# Ignore all kinds of additional output from 'make'. -case `$am_make -s -f confmf 2> /dev/null` in #( -*the\ am__doit\ target*) - am__include=include - am__quote= - _am_result=GNU - ;; -esac -# Now try BSD make style include. -if test "$am__include" = "#"; then - echo '.include "confinc"' > confmf - case `$am_make -s -f confmf 2> /dev/null` in #( - *the\ am__doit\ target*) - am__include=.include - am__quote="\"" - _am_result=BSD +# BSD make does it like this. +echo '.include "confinc.mk" # ignored' > confmf.BSD +# Other make implementations (GNU, Solaris 10, AIX) do it like this. +echo 'include confinc.mk # ignored' > confmf.GNU +_am_result=no +for s in GNU BSD; do + { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5 + (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + case $?:`cat confinc.out 2>/dev/null` in #( + '0:this is the am__doit target') : + case $s in #( + BSD) : + am__include='.include' am__quote='"' ;; #( + *) : + am__include='include' am__quote='' ;; +esac ;; #( + *) : ;; - esac -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 -$as_echo "$_am_result" >&6; } -rm -f confinc confmf +esac + if test "$am__include" != "#"; then + _am_result="yes ($s style)" + break + fi +done +rm -f confinc.* confmf.* +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 +printf "%s\n" "${_am_result}" >&6; } # Check whether --enable-dependency-tracking was given. -if test "${enable_dependency_tracking+set}" = set; then : +if test ${enable_dependency_tracking+y} +then : enableval=$enable_dependency_tracking; fi @@ -3322,11 +4082,12 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -3334,11 +4095,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -3349,11 +4114,11 @@ fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -3362,11 +4127,12 @@ if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else @@ -3374,11 +4140,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -3389,11 +4159,11 @@ fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi if test "x$ac_ct_CC" = x; then @@ -3401,8 +4171,8 @@ fi else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC @@ -3415,11 +4185,12 @@ if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -3427,11 +4198,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -3442,11 +4217,11 @@ fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -3455,11 +4230,12 @@ fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -3468,15 +4244,19 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -3492,18 +4272,18 @@ if test $ac_prog_rejected = yes; then # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -3514,11 +4294,12 @@ if test -z "$CC"; then do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -3526,11 +4307,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -3541,11 +4326,11 @@ fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -3558,11 +4343,12 @@ if test -z "$CC"; then do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else @@ -3570,11 +4356,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -3585,11 +4375,11 @@ fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -3601,34 +4391,138 @@ done else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args. +set dummy ${ac_tool_prefix}clang; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}clang" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "clang", so it can be a program name with args. +set dummy clang; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="clang" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi +else + CC="$ac_cv_prog_CC" fi fi -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 -for ac_option in --version -v -V -qversion; do +for ac_option in --version -v -V -qversion -version; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then @@ -3638,7 +4532,7 @@ $as_echo "$ac_try_echo"; } >&5 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done @@ -3646,7 +4540,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; @@ -3658,9 +4552,9 @@ ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +printf %s "checking whether the C compiler works... " >&6; } +ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" @@ -3681,11 +4575,12 @@ case "(($ac_try" in *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, @@ -3702,7 +4597,7 @@ do # certainly right. break;; *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi @@ -3718,44 +4613,46 @@ do done test "$ac_cv_exeext" = no && ac_cv_exeext= -else +else $as_nop ac_file='' fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 +if test -z "$ac_file" +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +printf %s "checking for C compiler default output file name... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +printf "%s\n" "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +printf %s "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with @@ -3769,15 +4666,15 @@ for ac_file in conftest.exe conftest conftest.*; do * ) break;; esac done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +else $as_nop + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +printf "%s\n" "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext @@ -3786,7 +4683,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int -main () +main (void) { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; @@ -3798,8 +4695,8 @@ _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +printf %s "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in @@ -3807,10 +4704,10 @@ case "(($ac_try" in *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in @@ -3818,39 +4715,40 @@ $as_echo "$ac_try_echo"; } >&5 *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +printf "%s\n" "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +printf %s "checking for suffix of object files... " >&6; } +if test ${ac_cv_objext+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; @@ -3864,11 +4762,12 @@ case "(($ac_try" in *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in @@ -3877,31 +4776,32 @@ $as_echo "$ac_try_echo"; } >&5 break;; esac done -else - $as_echo "$as_me: failed program was:" >&5 +else $as_nop + printf "%s\n" "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +printf "%s\n" "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5 +printf %s "checking whether the compiler supports GNU C... " >&6; } +if test ${ac_cv_c_compiler_gnu+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { #ifndef __GNUC__ choke me @@ -3911,29 +4811,33 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_compiler_gnu=yes -else +else $as_nop ac_compiler_gnu=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; } +ac_compiler_gnu=$ac_cv_c_compiler_gnu + if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi -ac_test_CFLAGS=${CFLAGS+set} +ac_test_CFLAGS=${CFLAGS+y} ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +printf %s "checking whether $CC accepts -g... " >&6; } +if test ${ac_cv_prog_cc_g+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no @@ -3942,57 +4846,60 @@ else /* end confdefs.h. */ int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_prog_cc_g=yes -else +else $as_nop CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : -else +else $as_nop ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_prog_cc_g=yes fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +printf "%s\n" "$ac_cv_prog_cc_g" >&6; } +if test $ac_test_CFLAGS; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then @@ -4007,94 +4914,144 @@ else CFLAGS= fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no +ac_prog_cc_stdc=no +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5 +printf %s "checking for $CC option to enable C11 features... " >&6; } +if test ${ac_cv_prog_cc_c11+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c11=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; +$ac_c_conftest_c11_program +_ACEOF +for ac_arg in '' -std=gnu11 +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_c11=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cc_c11" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC +fi -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; +if test "x$ac_cv_prog_cc_c11" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c11" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5 +printf "%s\n" "$ac_cv_prog_cc_c11" >&6; } + CC="$CC $ac_cv_prog_cc_c11" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11 + ac_prog_cc_stdc=c11 +fi +fi +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5 +printf %s "checking for $CC option to enable C99 features... " >&6; } +if test ${ac_cv_prog_cc_c99+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c99=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_c_conftest_c99_program +_ACEOF +for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99= +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_c99=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cc_c99" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC +fi -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} +if test "x$ac_cv_prog_cc_c99" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c99" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 +printf "%s\n" "$ac_cv_prog_cc_c99" >&6; } + CC="$CC $ac_cv_prog_cc_c99" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 + ac_prog_cc_stdc=c99 +fi +fi +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5 +printf %s "checking for $CC option to enable C89 features... " >&6; } +if test ${ac_cv_prog_cc_c89+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_c_conftest_c89_program _ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : + if ac_fn_c_try_compile "$LINENO" +then : ac_cv_prog_cc_c89=$ac_arg fi -rm -f core conftest.err conftest.$ac_objext +rm -f core conftest.err conftest.$ac_objext conftest.beam test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC - fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : +if test "x$ac_cv_prog_cc_c89" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c89" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +printf "%s\n" "$ac_cv_prog_cc_c89" >&6; } + CC="$CC $ac_cv_prog_cc_c89" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 + ac_prog_cc_stdc=c89 +fi fi ac_ext=c @@ -4103,21 +5060,23 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -ac_ext=c + + ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 -$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } -if ${am_cv_prog_cc_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 +printf %s "checking whether $CC understands -c and -o together... " >&6; } +if test ${am_cv_prog_cc_c_o+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; @@ -4145,8 +5104,8 @@ _ACEOF rm -f core conftest* unset am_i fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 -$as_echo "$am_cv_prog_cc_c_o" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 +printf "%s\n" "$am_cv_prog_cc_c_o" >&6; } if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. @@ -4164,11 +5123,12 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 -$as_echo_n "checking dependency style of $depcc... " >&6; } -if ${am_cv_CC_dependencies_compiler_type+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +printf %s "checking dependency style of $depcc... " >&6; } +if test ${am_cv_CC_dependencies_compiler_type+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For @@ -4275,8 +5235,8 @@ else fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 -$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if @@ -4291,407 +5251,40 @@ fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes +ac_header= ac_cache= +for ac_item in $ac_header_c_list do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : + if test $ac_cache; then + ac_fn_c_check_header_compile "$LINENO" $ac_header ac_cv_header_$ac_cache "$ac_includes_default" + if eval test \"x\$ac_cv_header_$ac_cache\" = xyes; then + printf "%s\n" "#define $ac_item 1" >> confdefs.h + fi + ac_header= ac_cache= + elif test $ac_header; then + ac_cache=$ac_item + else + ac_header=$ac_item + fi +done -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_GREP" || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_EGREP" || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then -$as_echo "#define STDC_HEADERS 1" >>confdefs.h -fi +if test $ac_cv_header_stdlib_h = yes && test $ac_cv_header_string_h = yes +then : -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF +printf "%s\n" "#define STDC_HEADERS 1" >>confdefs.h fi - -done - - ac_fn_c_check_type "$LINENO" "long long" "ac_cv_type_long_long" "$ac_includes_default" -if test "x$ac_cv_type_long_long" = xyes; then : +if test "x$ac_cv_type_long_long" = xyes +then : -$as_echo "#define HAVE_LONGLONG 1" >>confdefs.h +printf "%s\n" "#define HAVE_LONGLONG 1" >>confdefs.h longlong="yes" @@ -4700,15 +5293,15 @@ fi # - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is already defined" >&5 -$as_echo_n "checking if _REENTRANT is already defined... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is already defined" >&5 +printf %s "checking if _REENTRANT is already defined... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { #ifdef _REENTRANT @@ -4722,24 +5315,25 @@ main () } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } tmp_reentrant_initially_defined="yes" -else +else $as_nop - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } tmp_reentrant_initially_defined="no" fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext # if test "$tmp_reentrant_initially_defined" = "no"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is actually needed" >&5 -$as_echo_n "checking if _REENTRANT is actually needed... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is actually needed" >&5 +printf %s "checking if _REENTRANT is actually needed... " >&6; } case $host in *-*-solaris* | *-*-hpux*) @@ -4752,21 +5346,21 @@ $as_echo_n "checking if _REENTRANT is actually needed... " >&6; } if test "$tmp_need_reentrant" = "yes"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi fi # - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is onwards defined" >&5 -$as_echo_n "checking if _REENTRANT is onwards defined... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if _REENTRANT is onwards defined" >&5 +printf %s "checking if _REENTRANT is onwards defined... " >&6; } if test "$tmp_reentrant_initially_defined" = "yes" || test "$tmp_need_reentrant" = "yes"; then -$as_echo "#define NEED_REENTRANT 1" >>confdefs.h +printf "%s\n" "#define NEED_REENTRANT 1" >>confdefs.h cat >>confdefs.h <<_EOF #ifndef _REENTRANT @@ -4774,21 +5368,22 @@ cat >>confdefs.h <<_EOF #endif _EOF - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi # # Some systems (Solaris?) have socket() in -lsocket. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing socket" >&5 -$as_echo_n "checking for library containing socket... " >&6; } -if ${ac_cv_search_socket+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for library containing socket" >&5 +printf %s "checking for library containing socket... " >&6; } +if test ${ac_cv_search_socket+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -4796,57 +5391,60 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif char socket (); int -main () +main (void) { return socket (); ; return 0; } _ACEOF -for ac_lib in '' socket; do +for ac_lib in '' socket +do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi - if ac_fn_c_try_link "$LINENO"; then : + if ac_fn_c_try_link "$LINENO" +then : ac_cv_search_socket=$ac_res fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext - if ${ac_cv_search_socket+:} false; then : + if test ${ac_cv_search_socket+y} +then : break fi done -if ${ac_cv_search_socket+:} false; then : +if test ${ac_cv_search_socket+y} +then : -else +else $as_nop ac_cv_search_socket=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_socket" >&5 -$as_echo "$ac_cv_search_socket" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_socket" >&5 +printf "%s\n" "$ac_cv_search_socket" >&6; } ac_res=$ac_cv_search_socket -if test "$ac_res" != no; then : +if test "$ac_res" != no +then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi # Solaris has inet_addr() in -lnsl. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing inet_addr" >&5 -$as_echo_n "checking for library containing inet_addr... " >&6; } -if ${ac_cv_search_inet_addr+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for library containing inet_addr" >&5 +printf %s "checking for library containing inet_addr... " >&6; } +if test ${ac_cv_search_inet_addr+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -4854,46 +5452,48 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif char inet_addr (); int -main () +main (void) { return inet_addr (); ; return 0; } _ACEOF -for ac_lib in '' nsl; do +for ac_lib in '' nsl +do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi - if ac_fn_c_try_link "$LINENO"; then : + if ac_fn_c_try_link "$LINENO" +then : ac_cv_search_inet_addr=$ac_res fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext - if ${ac_cv_search_inet_addr+:} false; then : + if test ${ac_cv_search_inet_addr+y} +then : break fi done -if ${ac_cv_search_inet_addr+:} false; then : +if test ${ac_cv_search_inet_addr+y} +then : -else +else $as_nop ac_cv_search_inet_addr=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_inet_addr" >&5 -$as_echo "$ac_cv_search_inet_addr" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_inet_addr" >&5 +printf "%s\n" "$ac_cv_search_inet_addr" >&6; } ac_res=$ac_cv_search_inet_addr -if test "$ac_res" != no; then : +if test "$ac_res" != no +then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi @@ -4909,11 +5509,12 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -4921,11 +5522,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -4936,11 +5541,11 @@ fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -4949,11 +5554,12 @@ if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else @@ -4961,11 +5567,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -4976,11 +5586,11 @@ fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi if test "x$ac_ct_CC" = x; then @@ -4988,8 +5598,8 @@ fi else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC @@ -5002,11 +5612,12 @@ if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -5014,11 +5625,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5029,11 +5644,11 @@ fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -5042,11 +5657,12 @@ fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -5055,15 +5671,19 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5079,18 +5699,18 @@ if test $ac_prog_rejected = yes; then # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -5101,11 +5721,12 @@ if test -z "$CC"; then do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else @@ -5113,11 +5734,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5128,11 +5753,11 @@ fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -5145,11 +5770,12 @@ if test -z "$CC"; then do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else @@ -5157,11 +5783,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5172,11 +5802,11 @@ fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi @@ -5188,34 +5818,138 @@ done else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args. +set dummy ${ac_tool_prefix}clang; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}clang" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +printf "%s\n" "$CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "clang", so it can be a program name with args. +set dummy clang; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="clang" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +printf "%s\n" "$ac_ct_CC" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi +else + CC="$ac_cv_prog_CC" fi fi -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 -for ac_option in --version -v -V -qversion; do +for ac_option in --version -v -V -qversion -version; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +printf "%s\n" "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then @@ -5225,20 +5959,21 @@ $as_echo "$ac_try_echo"; } >&5 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5 +printf %s "checking whether the compiler supports GNU C... " >&6; } +if test ${ac_cv_c_compiler_gnu+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { #ifndef __GNUC__ choke me @@ -5248,29 +5983,33 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_compiler_gnu=yes -else +else $as_nop ac_compiler_gnu=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; } +ac_compiler_gnu=$ac_cv_c_compiler_gnu + if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi -ac_test_CFLAGS=${CFLAGS+set} +ac_test_CFLAGS=${CFLAGS+y} ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +printf %s "checking whether $CC accepts -g... " >&6; } +if test ${ac_cv_prog_cc_g+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no @@ -5279,57 +6018,60 @@ else /* end confdefs.h. */ int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_prog_cc_g=yes -else +else $as_nop CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : -else +else $as_nop ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_prog_cc_g=yes fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +printf "%s\n" "$ac_cv_prog_cc_g" >&6; } +if test $ac_test_CFLAGS; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then @@ -5344,94 +6086,144 @@ else CFLAGS= fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no +ac_prog_cc_stdc=no +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5 +printf %s "checking for $CC option to enable C11 features... " >&6; } +if test ${ac_cv_prog_cc_c11+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c11=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; +$ac_c_conftest_c11_program +_ACEOF +for ac_arg in '' -std=gnu11 +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_c11=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cc_c11" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC +fi -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; +if test "x$ac_cv_prog_cc_c11" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c11" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5 +printf "%s\n" "$ac_cv_prog_cc_c11" >&6; } + CC="$CC $ac_cv_prog_cc_c11" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11 + ac_prog_cc_stdc=c11 +fi +fi +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5 +printf %s "checking for $CC option to enable C99 features... " >&6; } +if test ${ac_cv_prog_cc_c99+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c99=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_c_conftest_c99_program +_ACEOF +for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99= +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO" +then : + ac_cv_prog_cc_c99=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cc_c99" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC +fi -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} +if test "x$ac_cv_prog_cc_c99" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c99" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 +printf "%s\n" "$ac_cv_prog_cc_c99" >&6; } + CC="$CC $ac_cv_prog_cc_c99" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 + ac_prog_cc_stdc=c99 +fi +fi +if test x$ac_prog_cc_stdc = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5 +printf %s "checking for $CC option to enable C89 features... " >&6; } +if test ${ac_cv_prog_cc_c89+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_c_conftest_c89_program _ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : + if ac_fn_c_try_compile "$LINENO" +then : ac_cv_prog_cc_c89=$ac_arg fi -rm -f core conftest.err conftest.$ac_objext +rm -f core conftest.err conftest.$ac_objext conftest.beam test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC - fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : +if test "x$ac_cv_prog_cc_c89" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cc_c89" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +printf "%s\n" "$ac_cv_prog_cc_c89" >&6; } + CC="$CC $ac_cv_prog_cc_c89" +fi + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 + ac_prog_cc_stdc=c89 +fi fi ac_ext=c @@ -5440,21 +6232,23 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -ac_ext=c + + ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 -$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } -if ${am_cv_prog_cc_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 +printf %s "checking whether $CC understands -c and -o together... " >&6; } +if test ${am_cv_prog_cc_c_o+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { ; @@ -5482,8 +6276,8 @@ _ACEOF rm -f core conftest* unset am_i fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 -$as_echo "$am_cv_prog_cc_c_o" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 +printf "%s\n" "$am_cv_prog_cc_c_o" >&6; } if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. @@ -5501,11 +6295,12 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 -$as_echo_n "checking dependency style of $depcc... " >&6; } -if ${am_cv_CC_dependencies_compiler_type+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +printf %s "checking dependency style of $depcc... " >&6; } +if test ${am_cv_CC_dependencies_compiler_type+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For @@ -5612,8 +6407,8 @@ else fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 -$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if @@ -5628,125 +6423,47 @@ fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 -$as_echo_n "checking whether ln -s works... " >&6; } -LN_S=$as_ln_s -if test "$LN_S" = "ln -s"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 -$as_echo "no, using $LN_S" >&6; } -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 -$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } -set x ${MAKE-make} -ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` -if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat >conftest.make <<\_ACEOF -SHELL = /bin/sh -all: - @echo '@@@%%%=$(MAKE)=@@@%%%' -_ACEOF -# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. -case `${MAKE-make} -f conftest.make 2>/dev/null` in - *@@@%%%=?*=@@@%%%*) - eval ac_cv_prog_make_${ac_make}_set=yes;; - *) - eval ac_cv_prog_make_${ac_make}_set=no;; -esac -rm -f conftest.make -fi -if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - SET_MAKE= -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - SET_MAKE="MAKE=${MAKE-make}" -fi - -for ac_prog in sshd -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_SSHD+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $SSHD in - [\\/]* | ?:[\\/]*) - ac_cv_path_SSHD="$SSHD" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/libexec$PATH_SEPARATOR /usr/sbin$PATH_SEPARATOR/usr/etc$PATH_SEPARATOR/etc -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_SSHD="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - ;; -esac -fi -SSHD=$ac_cv_path_SSHD -if test -n "$SSHD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SSHD" >&5 -$as_echo "$SSHD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - test -n "$SSHD" && break -done - if test -n "$SSHD"; then - SSHD_TRUE= - SSHD_FALSE='#' -else - SSHD_TRUE='#' - SSHD_FALSE= -fi -enable_win32_dll=yes -case $host in -*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}as", so it can be a program name with args. -set dummy ${ac_tool_prefix}as; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_AS+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AS"; then - ac_cv_prog_AS="$AS" # Let the user override the test. +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_AS="${ac_tool_prefix}as" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5755,38 +6472,47 @@ IFS=$as_save_IFS fi fi -AS=$ac_cv_prog_AS -if test -n "$AS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AS" >&5 -$as_echo "$AS" >&6; } +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +printf "%s\n" "$CXX" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi + test -n "$CXX" && break + done fi -if test -z "$ac_cv_prog_AS"; then - ac_ct_AS=$AS - # Extract the first word of "as", so it can be a program name with args. -set dummy as; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_AS+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_AS"; then - ac_cv_prog_ac_ct_AS="$ac_ct_AS" # Let the user override the test. +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++ +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_AS="as" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -5795,491 +6521,839 @@ IFS=$as_save_IFS fi fi -ac_ct_AS=$ac_cv_prog_ac_ct_AS -if test -n "$ac_ct_AS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AS" >&5 -$as_echo "$ac_ct_AS" >&6; } +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +printf "%s\n" "$ac_ct_CXX" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - if test "x$ac_ct_AS" = x; then - AS="false" + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac - AS=$ac_ct_AS + CXX=$ac_ct_CXX fi -else - AS="$ac_cv_prog_AS" fi - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_DLLTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DLLTOOL"; then - ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 fi +fi +# Provide some information about the compiler. +printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +printf "%s\n" "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } done - done -IFS=$as_save_IFS +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C++" >&5 +printf %s "checking whether the compiler supports GNU C++... " >&6; } +if test ${ac_cv_cxx_compiler_gnu+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO" +then : + ac_compiler_gnu=yes +else $as_nop + ac_compiler_gnu=no fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + fi -DLLTOOL=$ac_cv_prog_DLLTOOL -if test -n "$DLLTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -$as_echo "$DLLTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +printf "%s\n" "$ac_cv_cxx_compiler_gnu" >&6; } +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+y} +ac_save_CXXFLAGS=$CXXFLAGS +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +printf %s "checking whether $CXX accepts -g... " >&6; } +if test ${ac_cv_prog_cxx_g+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int +main (void) +{ -fi -if test -z "$ac_cv_prog_DLLTOOL"; then - ac_ct_DLLTOOL=$DLLTOOL - # Extract the first word of "dlltool", so it can be a program name with args. -set dummy dlltool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DLLTOOL"; then - ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_DLLTOOL="dlltool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO" +then : + ac_cv_prog_cxx_g=yes +else $as_nop + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO" +then : + +else $as_nop + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO" +then : + ac_cv_prog_cxx_g=yes fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -if test -n "$ac_ct_DLLTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -$as_echo "$ac_ct_DLLTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi - - if test "x$ac_ct_DLLTOOL" = x; then - DLLTOOL="false" +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +printf "%s\n" "$ac_cv_prog_cxx_g" >&6; } +if test $ac_test_CXXFLAGS; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DLLTOOL=$ac_ct_DLLTOOL + CXXFLAGS="-g" fi else - DLLTOOL="$ac_cv_prog_DLLTOOL" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. -set dummy ${ac_tool_prefix}objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_OBJDUMP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OBJDUMP"; then - ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= fi -done - done -IFS=$as_save_IFS - -fi -fi -OBJDUMP=$ac_cv_prog_OBJDUMP -if test -n "$OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 -$as_echo "$OBJDUMP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } fi - - +ac_prog_cxx_stdcxx=no +if test x$ac_prog_cxx_stdcxx = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++11 features" >&5 +printf %s "checking for $CXX option to enable C++11 features... " >&6; } +if test ${ac_cv_prog_cxx_11+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cxx_11=no +ac_save_CXX=$CXX +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_cxx_conftest_cxx11_program +_ACEOF +for ac_arg in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x -qlanglvl=extended0x -AA +do + CXX="$ac_save_CXX $ac_arg" + if ac_fn_cxx_try_compile "$LINENO" +then : + ac_cv_prog_cxx_cxx11=$ac_arg fi -if test -z "$ac_cv_prog_OBJDUMP"; then - ac_ct_OBJDUMP=$OBJDUMP - # Extract the first word of "objdump", so it can be a program name with args. -set dummy objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OBJDUMP"; then - ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cxx_cxx11" != "xno" && break +done +rm -f conftest.$ac_ext +CXX=$ac_save_CXX +fi + +if test "x$ac_cv_prog_cxx_cxx11" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cxx_cxx11" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx11" >&5 +printf "%s\n" "$ac_cv_prog_cxx_cxx11" >&6; } + CXX="$CXX $ac_cv_prog_cxx_cxx11" +fi + ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx11 + ac_prog_cxx_stdcxx=cxx11 +fi +fi +if test x$ac_prog_cxx_stdcxx = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++98 features" >&5 +printf %s "checking for $CXX option to enable C++98 features... " >&6; } +if test ${ac_cv_prog_cxx_98+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_cv_prog_cxx_98=no +ac_save_CXX=$CXX +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_cxx_conftest_cxx98_program +_ACEOF +for ac_arg in '' -std=gnu++98 -std=c++98 -qlanglvl=extended -AA do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_OBJDUMP="objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi + CXX="$ac_save_CXX $ac_arg" + if ac_fn_cxx_try_compile "$LINENO" +then : + ac_cv_prog_cxx_cxx98=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam + test "x$ac_cv_prog_cxx_cxx98" != "xno" && break done - done -IFS=$as_save_IFS - +rm -f conftest.$ac_ext +CXX=$ac_save_CXX fi + +if test "x$ac_cv_prog_cxx_cxx98" = xno +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +printf "%s\n" "unsupported" >&6; } +else $as_nop + if test "x$ac_cv_prog_cxx_cxx98" = x +then : + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +printf "%s\n" "none needed" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx98" >&5 +printf "%s\n" "$ac_cv_prog_cxx_cxx98" >&6; } + CXX="$CXX $ac_cv_prog_cxx_cxx98" fi -ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP -if test -n "$ac_ct_OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 -$as_echo "$ac_ct_OBJDUMP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx98 + ac_prog_cxx_stdcxx=cxx98 fi - - if test "x$ac_ct_OBJDUMP" = x; then - OBJDUMP="false" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OBJDUMP=$ac_ct_OBJDUMP - fi -else - OBJDUMP="$ac_cv_prog_OBJDUMP" fi - ;; -esac - -test -z "$AS" && AS=as - - - - - -test -z "$DLLTOOL" && DLLTOOL=dlltool +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +depcc="$CXX" am_compiler_list= +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +printf %s "checking dependency style of $depcc... " >&6; } +if test ${am_cv_CXX_dependencies_compiler_type+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf -test -z "$OBJDUMP" && OBJDUMP=objdump + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 +printf "%s\n" "$am_cv_CXX_dependencies_compiler_type" >&6; } +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +printf %s "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +printf "%s\n" "no, using $LN_S" >&6; } +fi -case `pwd` in - *\ * | *\ *) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 -$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval test \${ac_cv_prog_make_${ac_make}_set+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; esac - - - -macro_version='2.4.6' -macro_revision='2.4.6' - - - - - - - - - - - - - -ltmain=$ac_aux_dir/ltmain.sh - -# Backslashify metacharacters that are still active within -# double-quoted strings. -sed_quote_subst='s/\(["`$\\]\)/\\\1/g' - -# Same as above, but do not quote variable references. -double_quote_subst='s/\(["`\\]\)/\\\1/g' - -# Sed substitution to delay expansion of an escaped shell variable in a -# double_quote_subst'ed string. -delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' - -# Sed substitution to delay expansion of an escaped single quote. -delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' - -# Sed substitution to avoid accidental globbing in evaled expressions -no_glob_subst='s/\*/\\\*/g' - -ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 -$as_echo_n "checking how to print strings... " >&6; } -# Test print first, because it will be a builtin if present. -if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' -elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='printf %s\n' +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } + SET_MAKE= else - # Use this function as a fallback that always works. - func_fallback_echo () - { - eval 'cat <<_LTECHO_EOF -$1 -_LTECHO_EOF' - } - ECHO='func_fallback_echo' + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" fi -# func_echo_all arg... -# Invoke $ECHO with all args, space-separated. -func_echo_all () -{ - $ECHO "" -} +for ac_prog in sshd +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_path_SSHD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + case $SSHD in + [\\/]* | ?:[\\/]*) + ac_cv_path_SSHD="$SSHD" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/libexec$PATH_SEPARATOR /usr/sbin$PATH_SEPARATOR/usr/etc$PATH_SEPARATOR/etc +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_path_SSHD="$as_dir$ac_word$ac_exec_ext" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS -case $ECHO in - printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 -$as_echo "printf" >&6; } ;; - print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 -$as_echo "print -r" >&6; } ;; - *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 -$as_echo "cat" >&6; } ;; + ;; esac +fi +SSHD=$ac_cv_path_SSHD +if test -n "$SSHD"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $SSHD" >&5 +printf "%s\n" "$SSHD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + test -n "$SSHD" && break +done + if test -n "$SSHD"; then + SSHD_TRUE= + SSHD_FALSE='#' +else + SSHD_TRUE='#' + SSHD_FALSE= +fi +enable_win32_dll=yes - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 -$as_echo_n "checking for a sed that does not truncate output... " >&6; } -if ${ac_cv_path_SED+:} false; then : - $as_echo_n "(cached) " >&6 +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}as", so it can be a program name with args. +set dummy ${ac_tool_prefix}as; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_AS+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$AS"; then + ac_cv_prog_AS="$AS" # Let the user override the test. else - ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ - for ac_i in 1 2 3 4 5 6 7; do - ac_script="$ac_script$as_nl$ac_script" - done - echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed - { ac_script=; unset ac_script;} - if test -z "$SED"; then - ac_path_SED_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in sed gsed; do + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_SED" || continue -# Check for GNU ac_path_SED and select it if it is found. - # Check for GNU $ac_path_SED -case `"$ac_path_SED" --version 2>&1` in -*GNU*) - ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo '' >> "conftest.nl" - "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_SED_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_SED="$ac_path_SED" - ac_path_SED_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_SED_found && break 3 - done - done + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_AS="${ac_tool_prefix}as" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done done IFS=$as_save_IFS - if test -z "$ac_cv_path_SED"; then - as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 - fi + +fi +fi +AS=$ac_cv_prog_AS +if test -n "$AS"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AS" >&5 +printf "%s\n" "$AS" >&6; } else - ac_cv_path_SED=$SED + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 -$as_echo "$ac_cv_path_SED" >&6; } - SED="$ac_cv_path_SED" - rm -f conftest.sed - -test -z "$SED" && SED=sed -Xsed="$SED -e 1s/^X//" - +fi +if test -z "$ac_cv_prog_AS"; then + ac_ct_AS=$AS + # Extract the first word of "as", so it can be a program name with args. +set dummy as; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_AS+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_AS"; then + ac_cv_prog_ac_ct_AS="$ac_ct_AS" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AS="as" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +ac_ct_AS=$ac_cv_prog_ac_ct_AS +if test -n "$ac_ct_AS"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AS" >&5 +printf "%s\n" "$ac_ct_AS" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + if test "x$ac_ct_AS" = x; then + AS="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AS=$ac_ct_AS + fi +else + AS="$ac_cv_prog_AS" +fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_DLLTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +printf "%s\n" "$DLLTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_DLLTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +printf "%s\n" "$ac_ct_DLLTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 -$as_echo_n "checking for fgrep... " >&6; } -if ${ac_cv_path_FGREP+:} false; then : - $as_echo_n "(cached) " >&6 + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_OBJDUMP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else - if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 - then ac_cv_path_FGREP="$GREP -F" - else - if test -z "$FGREP"; then - ac_path_FGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in fgrep; do + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_FGREP" || continue -# Check for GNU ac_path_FGREP and select it if it is found. - # Check for GNU $ac_path_FGREP -case `"$ac_path_FGREP" --version 2>&1` in -*GNU*) - ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'FGREP' >> "conftest.nl" - "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_FGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_FGREP="$ac_path_FGREP" - ac_path_FGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac +IFS=$as_save_IFS - $ac_path_FGREP_found && break 3 - done - done +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +printf "%s\n" "$OBJDUMP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_OBJDUMP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done done IFS=$as_save_IFS - if test -z "$ac_cv_path_FGREP"; then - as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +printf "%s\n" "$ac_ct_OBJDUMP" >&6; } else - ac_cv_path_FGREP=$FGREP + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - fi + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 -$as_echo "$ac_cv_path_FGREP" >&6; } - FGREP="$ac_cv_path_FGREP" + ;; +esac -test -z "$GREP" && GREP=grep +test -z "$AS" && AS=as +test -z "$DLLTOOL" && DLLTOOL=dlltool +test -z "$OBJDUMP" && OBJDUMP=objdump @@ -6287,110 +7361,17 @@ test -z "$GREP" && GREP=grep +case `pwd` in + *\ * | *\ *) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +printf "%s\n" "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac -# Check whether --with-gnu-ld was given. -if test "${with_gnu_ld+set}" = set; then : - withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes -else - with_gnu_ld=no -fi -ac_prog=ld -if test yes = "$GCC"; then - # Check if gcc -print-prog-name=ld gives a path. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 -$as_echo_n "checking for ld used by $CC... " >&6; } - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return, which upsets mingw - ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $ac_prog in - # Accept absolute paths. - [\\/]* | ?:[\\/]*) - re_direlt='/[^/][^/]*/\.\./' - # Canonicalize the pathname of ld - ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` - while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do - ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` - done - test -z "$LD" && LD=$ac_prog - ;; - "") - # If it fails, then pretend we aren't using GCC. - ac_prog=ld - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac -elif test yes = "$with_gnu_ld"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 -$as_echo_n "checking for GNU ld... " >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 -$as_echo_n "checking for non-GNU ld... " >&6; } -fi -if ${lt_cv_path_LD+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$LD"; then - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - lt_cv_path_LD=$ac_dir/$ac_prog - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$lt_cv_path_LD" -v 2>&1 &5 -$as_echo "$LD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 -$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } -if ${lt_cv_prog_gnu_ld+:} false; then : - $as_echo_n "(cached) " >&6 -else - # I'd rather use --version here, but apparently some GNU lds only accept -v. -case `$LD -v 2>&1 &5 -$as_echo "$lt_cv_prog_gnu_ld" >&6; } -with_gnu_ld=$lt_cv_prog_gnu_ld @@ -6400,385 +7381,162 @@ with_gnu_ld=$lt_cv_prog_gnu_ld -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 -$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } -if ${lt_cv_path_NM+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$NM"; then - # Let the user override the test. - lt_cv_path_NM=$NM -else - lt_nm_to_check=${ac_tool_prefix}nm - if test -n "$ac_tool_prefix" && test "$build" = "$host"; then - lt_nm_to_check="$lt_nm_to_check nm" - fi - for lt_tmp_nm in $lt_nm_to_check; do - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - tmp_nm=$ac_dir/$lt_tmp_nm - if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then - # Check to see if the nm accepts a BSD-compat flag. - # Adding the 'sed 1q' prevents false positives on HP-UX, which says: - # nm: unknown option "B" ignored - # Tru64's nm complains that /dev/null is an invalid object file - # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty - case $build_os in - mingw*) lt_bad_file=conftest.nm/nofile ;; - *) lt_bad_file=/dev/null ;; - esac - case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in - *$lt_bad_file* | *'Invalid file or object type'*) - lt_cv_path_NM="$tmp_nm -B" - break 2 - ;; - *) - case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in - */dev/null*) - lt_cv_path_NM="$tmp_nm -p" - break 2 - ;; - *) - lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but - continue # so that we can try to find one that supports BSD flags - ;; - esac - ;; - esac - fi - done - IFS=$lt_save_ifs - done - : ${lt_cv_path_NM=no} -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 -$as_echo "$lt_cv_path_NM" >&6; } -if test no != "$lt_cv_path_NM"; then - NM=$lt_cv_path_NM -else - # Didn't find any BSD compatible name lister, look for dumpbin. - if test -n "$DUMPBIN"; then : - # Let the user override the test. - else - if test -n "$ac_tool_prefix"; then - for ac_prog in dumpbin "link -dump" - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_DUMPBIN+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DUMPBIN"; then - ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -DUMPBIN=$ac_cv_prog_DUMPBIN -if test -n "$DUMPBIN"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 -$as_echo "$DUMPBIN" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - test -n "$DUMPBIN" && break - done -fi -if test -z "$DUMPBIN"; then - ac_ct_DUMPBIN=$DUMPBIN - for ac_prog in dumpbin "link -dump" -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DUMPBIN"; then - ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN -if test -n "$ac_ct_DUMPBIN"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 -$as_echo "$ac_ct_DUMPBIN" >&6; } +ltmain=$ac_aux_dir/ltmain.sh + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +printf %s "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' fi +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} - test -n "$ac_ct_DUMPBIN" && break -done - - if test "x$ac_ct_DUMPBIN" = x; then - DUMPBIN=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; +case $ECHO in + printf*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +printf "%s\n" "printf" >&6; } ;; + print*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +printf "%s\n" "print -r" >&6; } ;; + *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +printf "%s\n" "cat" >&6; } ;; esac - DUMPBIN=$ac_ct_DUMPBIN - fi -fi - case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in - *COFF*) - DUMPBIN="$DUMPBIN -symbols -headers" - ;; - *) - DUMPBIN=: - ;; - esac - fi - if test : != "$DUMPBIN"; then - NM=$DUMPBIN - fi -fi -test -z "$NM" && NM=nm -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 -$as_echo_n "checking the name lister ($NM) interface... " >&6; } -if ${lt_cv_nm_interface+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_nm_interface="BSD nm" - echo "int some_variable = 0;" > conftest.$ac_ext - (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) - (eval "$ac_compile" 2>conftest.err) - cat conftest.err >&5 - (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) - (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) - cat conftest.err >&5 - (eval echo "\"\$as_me:$LINENO: output\"" >&5) - cat conftest.out >&5 - if $GREP 'External.*some_variable' conftest.out > /dev/null; then - lt_cv_nm_interface="MS dumpbin" - fi - rm -f conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 -$as_echo "$lt_cv_nm_interface" >&6; } -# find the maximum length of command line arguments -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 -$as_echo_n "checking the maximum length of command line arguments... " >&6; } -if ${lt_cv_sys_max_cmd_len+:} false; then : - $as_echo_n "(cached) " >&6 -else - i=0 - teststring=ABCD - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; - cygwin* | mingw* | cegcc*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; - - mint*) - # On MiNT this can take a long time and run out of memory. - lt_cv_sys_max_cmd_len=8192; - ;; - - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; - - bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; - - interix*) - # We know the value 262144 and hardcode it with a safety zone (like BSD) - lt_cv_sys_max_cmd_len=196608 - ;; - os2*) - # The test takes a long time on OS/2. - lt_cv_sys_max_cmd_len=8192 - ;; - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - sco3.2v5*) - lt_cv_sys_max_cmd_len=102400 - ;; - sysv5* | sco5v6* | sysv4.2uw2*) - kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` - if test -n "$kargmax"; then - lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` - else - lt_cv_sys_max_cmd_len=32768 - fi - ;; - *) - lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` - if test -n "$lt_cv_sys_max_cmd_len" && \ - test undefined != "$lt_cv_sys_max_cmd_len"; then - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - else - # Make teststring a little bigger before we do anything with it. - # a 1K string should be a reasonable start. - for i in 1 2 3 4 5 6 7 8; do - teststring=$teststring$teststring - done - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - # If test is not a shell built-in, we'll probably end up computing a - # maximum length that is only half of the actual maximum length, but - # we can't tell. - while { test X`env echo "$teststring$teststring" 2>/dev/null` \ - = "X$teststring$teststring"; } >/dev/null 2>&1 && - test 17 != "$i" # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - # Only check the string length outside the loop. - lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` - teststring= - # Add a significant safety factor because C++ compilers can tack on - # massive amounts of additional arguments before passing them to the - # linker. It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - fi - ;; +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +printf %s "checking for a sed that does not truncate output... " >&6; } +if test ${ac_cv_path_SED+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; esac + for ac_prog in sed gsed + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac -fi - -if test -n "$lt_cv_sys_max_cmd_len"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 -$as_echo "$lt_cv_sys_max_cmd_len" >&6; } + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 -$as_echo "none" >&6; } + ac_cv_path_SED=$SED fi -max_cmd_len=$lt_cv_sys_max_cmd_len - - - - - -: ${CP="cp -f"} -: ${MV="mv -f"} -: ${RM="rm -f"} - -if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then - lt_unset=unset -else - lt_unset=false fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +printf "%s\n" "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" -# test EBCDIC or ASCII -case `echo X|tr X '\101'` in - A) # ASCII based system - # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr - lt_SP2NL='tr \040 \012' - lt_NL2SP='tr \015\012 \040\040' - ;; - *) # EBCDIC based system - lt_SP2NL='tr \100 \n' - lt_NL2SP='tr \r\n \100\100' - ;; -esac - @@ -6786,451 +7544,346 @@ esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +printf %s "checking for grep that handles long lines and -e... " >&6; } +if test ${ac_cv_path_GREP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in grep ggrep + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -if ${lt_cv_to_host_file_cmd+:} false; then : - $as_echo_n "(cached) " >&6 + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi else - case $host in - *-*-mingw* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 - ;; - *-*-cygwin* ) - lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 - ;; - * ) # otherwise, assume *nix - lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 - ;; - esac - ;; - *-*-cygwin* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin - ;; - *-*-cygwin* ) - lt_cv_to_host_file_cmd=func_convert_file_noop - ;; - * ) # otherwise, assume *nix - lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin - ;; - esac - ;; - * ) # unhandled hosts (and "normal" native builds) - lt_cv_to_host_file_cmd=func_convert_file_noop - ;; -esac + ac_cv_path_GREP=$GREP +fi fi - -to_host_file_cmd=$lt_cv_to_host_file_cmd -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -$as_echo "$lt_cv_to_host_file_cmd" >&6; } - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -if ${lt_cv_to_tool_file_cmd+:} false; then : - $as_echo_n "(cached) " >&6 -else - #assume ordinary cross tools, or native build. -lt_cv_to_tool_file_cmd=func_convert_file_noop -case $host in - *-*-mingw* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 - ;; - esac - ;; -esac - -fi - -to_tool_file_cmd=$lt_cv_to_tool_file_cmd -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -$as_echo "$lt_cv_to_tool_file_cmd" >&6; } - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 -$as_echo_n "checking for $LD option to reload object files... " >&6; } -if ${lt_cv_ld_reload_flag+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_reload_flag='-r' -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 -$as_echo "$lt_cv_ld_reload_flag" >&6; } -reload_flag=$lt_cv_ld_reload_flag -case $reload_flag in -"" | " "*) ;; -*) reload_flag=" $reload_flag" ;; -esac -reload_cmds='$LD$reload_flag -o $output$reload_objs' -case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - if test yes != "$GCC"; then - reload_cmds=false - fi - ;; - darwin*) - if test yes = "$GCC"; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' - else - reload_cmds='$LD$reload_flag -o $output$reload_objs' - fi - ;; -esac - - - - - +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +printf "%s\n" "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. -set dummy ${ac_tool_prefix}objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_OBJDUMP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OBJDUMP"; then - ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +printf %s "checking for egrep... " >&6; } +if test ${ac_cv_path_EGREP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in egrep + do for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done + ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break done -IFS=$as_save_IFS + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac -fi -fi -OBJDUMP=$ac_cv_prog_OBJDUMP -if test -n "$OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 -$as_echo "$OBJDUMP" >&6; } + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + ac_cv_path_EGREP=$EGREP fi - + fi fi -if test -z "$ac_cv_prog_OBJDUMP"; then - ac_ct_OBJDUMP=$OBJDUMP - # Extract the first word of "objdump", so it can be a program name with args. -set dummy objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OBJDUMP"; then - ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +printf "%s\n" "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +printf %s "checking for fgrep... " >&6; } +if test ${ac_cv_path_FGREP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in fgrep + do for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_OBJDUMP="objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done + ac_path_FGREP="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_FGREP" || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + printf %s 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + printf "%s\n" 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break done -IFS=$as_save_IFS + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac -fi -fi -ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP -if test -n "$ac_ct_OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 -$as_echo "$ac_ct_OBJDUMP" >&6; } + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + ac_cv_path_FGREP=$FGREP fi - if test "x$ac_ct_OBJDUMP" = x; then - OBJDUMP="false" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OBJDUMP=$ac_ct_OBJDUMP - fi -else - OBJDUMP="$ac_cv_prog_OBJDUMP" + fi fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +printf "%s\n" "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" -test -z "$OBJDUMP" && OBJDUMP=objdump +test -z "$GREP" && GREP=grep -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 -$as_echo_n "checking how to recognize dependent libraries... " >&6; } -if ${lt_cv_deplibs_check_method+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_file_magic_cmd='$MAGIC_CMD' -lt_cv_file_magic_test_file= -lt_cv_deplibs_check_method='unknown' -# Need to set the preceding variable on all platforms that support -# interlibrary dependencies. -# 'none' -- dependencies not supported. -# 'unknown' -- same as none, but documents that we really don't know. -# 'pass_all' -- all dependencies passed with no checks. -# 'test_compile' -- check by making test program. -# 'file_magic [[regex]]' -- check by looking for files in library path -# that responds to the $file_magic_cmd with a given extended regex. -# If you have 'file' or equivalent on your system and you're not sure -# whether 'pass_all' will *always* work, you probably want this one. -case $host_os in -aix[4-9]*) - lt_cv_deplibs_check_method=pass_all - ;; -beos*) - lt_cv_deplibs_check_method=pass_all - ;; -bsdi[45]*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' - lt_cv_file_magic_cmd='/usr/bin/file -L' - lt_cv_file_magic_test_file=/shlib/libc.so - ;; -cygwin*) - # func_win32_libid is a shell function defined in ltmain.sh - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - ;; -mingw* | pw32*) - # Base MSYS/MinGW do not provide the 'file' command needed by - # func_win32_libid shell function, so use a weaker test based on 'objdump', - # unless we find 'file', for example because we are cross-compiling. - if ( file / ) >/dev/null 2>&1; then - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else - # Keep this pattern in sync with the one in func_win32_libid. - lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -cegcc*) - # use the weaker test based on 'objdump'. See mingw*. - lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' - ;; -darwin* | rhapsody*) - lt_cv_deplibs_check_method=pass_all - ;; -freebsd* | dragonfly*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - case $host_cpu in - i*86 ) - # Not sure whether the presence of OpenBSD here was a mistake. - # Let's accept both of them until this is cleared up. - lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` - ;; - esac - else - lt_cv_deplibs_check_method=pass_all - fi - ;; - -haiku*) - lt_cv_deplibs_check_method=pass_all - ;; - -hpux10.20* | hpux11*) - lt_cv_file_magic_cmd=/usr/bin/file - case $host_cpu in - ia64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' - lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so - ;; - hppa*64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' - lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl - ;; - *) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' - lt_cv_file_magic_test_file=/usr/lib/libc.sl - ;; - esac - ;; - -interix[3-9]*) - # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' - ;; - -irix5* | irix6* | nonstopux*) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - lt_cv_deplibs_check_method=pass_all - ;; - -# This must be glibc/ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - lt_cv_deplibs_check_method=pass_all - ;; - -netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' - fi - ;; - -newos6*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=/usr/lib/libnls.so - ;; -*nto* | *qnx*) - lt_cv_deplibs_check_method=pass_all - ;; -openbsd* | bitrig*) - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - fi - ;; -osf3* | osf4* | osf5*) - lt_cv_deplibs_check_method=pass_all - ;; -rdos*) - lt_cv_deplibs_check_method=pass_all - ;; -solaris*) - lt_cv_deplibs_check_method=pass_all - ;; -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - lt_cv_deplibs_check_method=pass_all - ;; +# Check whether --with-gnu-ld was given. +if test ${with_gnu_ld+y} +then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else $as_nop + with_gnu_ld=no +fi -sysv4 | sysv4.3*) - case $host_vendor in - motorola) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` - ;; - ncr) - lt_cv_deplibs_check_method=pass_all - ;; - sequent) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' - ;; - sni) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" - lt_cv_file_magic_test_file=/lib/libc.so - ;; - siemens) - lt_cv_deplibs_check_method=pass_all +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +printf %s "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld ;; - pc) - lt_cv_deplibs_check_method=pass_all + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown ;; esac - ;; +elif test yes = "$with_gnu_ld"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +printf %s "checking for GNU ld... " >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +printf %s "checking for non-GNU ld... " >&6; } +fi +if test ${lt_cv_path_LD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +printf "%s\n" "$LD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +printf %s "checking if the linker ($LD) is GNU ld... " >&6; } +if test ${lt_cv_prog_gnu_ld+y} +then : + printf %s "(cached) " >&6 +else $as_nop + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 -$as_echo "$lt_cv_deplibs_check_method" >&6; } - -file_magic_glob= -want_nocaseglob=no -if test "$build" = "$host"; then - case $host_os in - mingw* | pw32*) - if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then - want_nocaseglob=yes - else - file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` - fi - ;; - esac fi - -file_magic_cmd=$lt_cv_file_magic_cmd -deplibs_check_method=$lt_cv_deplibs_check_method -test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - - - - - - - - - - - +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5 +printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld @@ -7240,166 +7893,97 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_DLLTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DLLTOOL"; then - ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +printf %s "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if test ${lt_cv_path_NM+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM=$NM else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 + lt_nm_to_check=${ac_tool_prefix}nm + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" fi -done - done -IFS=$as_save_IFS - -fi -fi -DLLTOOL=$ac_cv_prog_DLLTOOL -if test -n "$DLLTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -$as_echo "$DLLTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_DLLTOOL"; then - ac_ct_DLLTOOL=$DLLTOOL - # Extract the first word of "dlltool", so it can be a program name with args. -set dummy dlltool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DLLTOOL"; then - ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_DLLTOOL="dlltool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + tmp_nm=$ac_dir/$lt_tmp_nm + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the 'sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty + case $build_os in + mingw*) lt_bad_file=conftest.nm/nofile ;; + *) lt_bad_file=/dev/null ;; + esac + case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in + *$lt_bad_file* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break 2 + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break 2 + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS=$lt_save_ifs done -IFS=$as_save_IFS - + : ${lt_cv_path_NM=no} fi fi -ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -if test -n "$ac_ct_DLLTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -$as_echo "$ac_ct_DLLTOOL" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +printf "%s\n" "$lt_cv_path_NM" >&6; } +if test no != "$lt_cv_path_NM"; then + NM=$lt_cv_path_NM else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_DLLTOOL" = x; then - DLLTOOL="false" + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DLLTOOL=$ac_ct_DLLTOOL - fi -else - DLLTOOL="$ac_cv_prog_DLLTOOL" -fi - -test -z "$DLLTOOL" && DLLTOOL=dlltool - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_sharedlib_from_linklib_cmd='unknown' - -case $host_os in -cygwin* | mingw* | pw32* | cegcc*) - # two different shell functions defined in ltmain.sh; - # decide which one to use based on capabilities of $DLLTOOL - case `$DLLTOOL --help 2>&1` in - *--identify-strict*) - lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib - ;; - *) - lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback - ;; - esac - ;; -*) - # fallback: assume linklib IS sharedlib - lt_cv_sharedlib_from_linklib_cmd=$ECHO - ;; -esac - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO - - - - - - - -if test -n "$ac_tool_prefix"; then - for ac_prog in ar + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_AR+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AR"; then - ac_cv_prog_AR="$AR" # Let the user override the test. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_DUMPBIN+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -7408,42 +7992,47 @@ IFS=$as_save_IFS fi fi -AR=$ac_cv_prog_AR -if test -n "$AR"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 -$as_echo "$AR" >&6; } +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +printf "%s\n" "$DUMPBIN" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - test -n "$AR" && break + test -n "$DUMPBIN" && break done fi -if test -z "$AR"; then - ac_ct_AR=$AR - for ac_prog in ar +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_AR+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_AR"; then - ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_DUMPBIN+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -7452,123 +8041,398 @@ IFS=$as_save_IFS fi fi -ac_ct_AR=$ac_cv_prog_ac_ct_AR -if test -n "$ac_ct_AR"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 -$as_echo "$ac_ct_AR" >&6; } +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +printf "%s\n" "$ac_ct_DUMPBIN" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - test -n "$ac_ct_AR" && break + test -n "$ac_ct_DUMPBIN" && break done - if test "x$ac_ct_AR" = x; then - AR="false" + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac - AR=$ac_ct_AR + DUMPBIN=$ac_ct_DUMPBIN fi fi -: ${AR=ar} -: ${AR_FLAGS=cru} - - + case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols -headers" + ;; + *) + DUMPBIN=: + ;; + esac + fi + if test : != "$DUMPBIN"; then + NM=$DUMPBIN + fi +fi +test -z "$NM" && NM=nm +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +printf %s "checking the name lister ($NM) interface... " >&6; } +if test ${lt_cv_nm_interface+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +printf "%s\n" "$lt_cv_nm_interface" >&6; } +# find the maximum length of command line arguments +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +printf %s "checking the maximum length of command line arguments... " >&6; } +if test ${lt_cv_sys_max_cmd_len+y} +then : + printf %s "(cached) " >&6 +else $as_nop + i=0 + teststring=ABCD -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -$as_echo_n "checking for archiver @FILE support... " >&6; } -if ${lt_cv_ar_at_file+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ar_at_file=no - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; -int -main () -{ + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - echo conftest.$ac_objext > conftest.lst - lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' - { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 - (eval $lt_ar_try) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if test 0 -eq "$ac_status"; then - # Ensure the archiver fails upon bogus file names. - rm -f conftest.$ac_objext libconftest.a - { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 - (eval $lt_ar_try) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if test 0 -ne "$ac_status"; then - lt_cv_ar_at_file=@ - fi - fi - rm -f conftest.* libconftest.a + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -$as_echo "$lt_cv_ar_at_file" >&6; } + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; -if test no = "$lt_cv_ar_at_file"; then - archiver_list_spec= + bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len" && \ + test undefined != "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test X`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test 17 != "$i" # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n "$lt_cv_sys_max_cmd_len"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +printf "%s\n" "$lt_cv_sys_max_cmd_len" >&6; } else - archiver_list_spec=$lt_cv_ar_at_file + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none" >&5 +printf "%s\n" "none" >&6; } fi +max_cmd_len=$lt_cv_sys_max_cmd_len +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. -set dummy ${ac_tool_prefix}strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset else - if test -n "$STRIP"; then - ac_cv_prog_STRIP="$STRIP" # Let the user override the test. + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +printf %s "checking how to convert $build file names to $host format... " >&6; } +if test ${lt_cv_to_host_file_cmd+y} +then : + printf %s "(cached) " >&6 +else $as_nop + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +printf "%s\n" "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +printf %s "checking how to convert $build file names to toolchain format... " >&6; } +if test ${lt_cv_to_tool_file_cmd+y} +then : + printf %s "(cached) " >&6 +else $as_nop + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +printf "%s\n" "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +printf %s "checking for $LD option to reload object files... " >&6; } +if test ${lt_cv_ld_reload_flag+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ld_reload_flag='-r' +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +printf "%s\n" "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test yes != "$GCC"; then + reload_cmds=false + fi + ;; + darwin*) + if test yes = "$GCC"; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_OBJDUMP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_STRIP="${ac_tool_prefix}strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -7577,38 +8441,43 @@ IFS=$as_save_IFS fi fi -STRIP=$ac_cv_prog_STRIP -if test -n "$STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 -$as_echo "$STRIP" >&6; } +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +printf "%s\n" "$OBJDUMP" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi fi -if test -z "$ac_cv_prog_STRIP"; then - ac_ct_STRIP=$STRIP - # Extract the first word of "strip", so it can be a program name with args. -set dummy strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_STRIP"; then - ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_OBJDUMP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_STRIP="strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 break 2 fi done @@ -7617,180 +8486,258 @@ IFS=$as_save_IFS fi fi -ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP -if test -n "$ac_ct_STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 -$as_echo "$ac_ct_STRIP" >&6; } +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +printf "%s\n" "$ac_ct_OBJDUMP" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - if test "x$ac_ct_STRIP" = x; then - STRIP=":" + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac - STRIP=$ac_ct_STRIP + OBJDUMP=$ac_ct_OBJDUMP fi else - STRIP="$ac_cv_prog_STRIP" + OBJDUMP="$ac_cv_prog_OBJDUMP" fi -test -z "$STRIP" && STRIP=: +test -z "$OBJDUMP" && OBJDUMP=objdump -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +printf %s "checking how to recognize dependent libraries... " >&6; } +if test ${lt_cv_deplibs_check_method+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# 'unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# that responds to the $file_magic_cmd with a given extended regex. +# If you have 'file' or equivalent on your system and you're not sure +# whether 'pass_all' will *always* work, you probably want this one. -fi -fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 -$as_echo "$RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; +beos*) + lt_cv_deplibs_check_method=pass_all + ;; -fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; -fi -fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 -$as_echo "$ac_ct_RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + if ( file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - RANLIB=$ac_ct_RANLIB + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' fi -else - RANLIB="$ac_cv_prog_RANLIB" -fi - -test -z "$RANLIB" && RANLIB=: - - + ;; +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; -if test -n "$RANLIB"; then - case $host_os in - bitrig* | openbsd*) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" -fi - -case $host_os in - darwin*) - lock_old_archive_extraction=yes ;; - *) - lock_old_archive_extraction=no ;; -esac - - - - - + ;; +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + lt_cv_deplibs_check_method=pass_all + ;; +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; +openbsd* | bitrig*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +os2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +printf "%s\n" "$lt_cv_deplibs_check_method" >&6; } +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown @@ -7809,304 +8756,271 @@ esac -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} -# Allow CC to be a program name with arguments. -compiler=$CC -# Check for command to grab the raw symbol name followed by C symbol from nm. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 -$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } -if ${lt_cv_sys_global_symbol_pipe+:} false; then : - $as_echo_n "(cached) " >&6 +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_DLLTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS -# These are sane defaults that work on at least a few old systems. -# [They come from Ultrix. What could be older than Ultrix?!! ;)] - -# Character class describing NM global symbol codes. -symcode='[BCDEGRST]' +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +printf "%s\n" "$DLLTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi -# Regexp to match symbols that can be accessed directly from C. -sympat='\([_A-Za-z][_A-Za-z0-9]*\)' -# Define system-specific variables. -case $host_os in -aix*) - symcode='[BCDT]' - ;; -cygwin* | mingw* | pw32* | cegcc*) - symcode='[ABCDGISTW]' - ;; -hpux*) - if test ia64 = "$host_cpu"; then - symcode='[ABCDEGRST]' +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_DLLTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 fi - ;; -irix* | nonstopux*) - symcode='[BCDEGRST]' - ;; -osf*) - symcode='[BCDEGQRST]' - ;; -solaris*) - symcode='[BDRT]' - ;; -sco3.2v5*) - symcode='[DT]' - ;; -sysv4.2uw2*) - symcode='[DT]' - ;; -sysv5* | sco5v6* | unixware* | OpenUNIX*) - symcode='[ABDT]' - ;; -sysv4) - symcode='[DFNSTU]' - ;; -esac +done + done +IFS=$as_save_IFS -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[ABCDGIRSTW]' ;; -esac +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +printf "%s\n" "$ac_ct_DLLTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi -if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Gets list of data symbols to import. - lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" - # Adjust the below global symbol transforms to fixup imported variables. - lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" - lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" - lt_c_name_lib_hook="\ - -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ - -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi else - # Disable hooks by default. - lt_cv_sys_global_symbol_to_import= - lt_cdecl_hook= - lt_c_name_hook= - lt_c_name_lib_hook= + DLLTOOL="$ac_cv_prog_DLLTOOL" fi -# Transform an extracted symbol line into a proper C declaration. -# Some systems (esp. on ia64) link data and code symbols differently, -# so use this general approach. -lt_cv_sys_global_symbol_to_cdecl="sed -n"\ -$lt_cdecl_hook\ -" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" +test -z "$DLLTOOL" && DLLTOOL=dlltool -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ -$lt_c_name_hook\ -" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" -# Transform an extracted symbol line into symbol name with lib prefix and -# symbol address. -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ -$lt_c_name_lib_hook\ -" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ -" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" -# Handle CRLF in mingw tool chain -opt_cr= -case $build_os in -mingw*) - opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + + + + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +printf %s "checking how to associate runtime and link libraries... " >&6; } +if test ${lt_cv_sharedlib_from_linklib_cmd+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh; + # decide which one to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd=$ECHO ;; esac -# Try without a prefix underscore, then with it. -for ac_symprfx in "" "_"; do +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +printf "%s\n" "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO - # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. - symxfrm="\\1 $ac_symprfx\\2 \\2" - # Write the raw and C identifiers. - if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Fake it for dumpbin and say T for any non-static function, - # D for any global variable and I for any imported variable. - # Also find C++ and __fastcall symbols from MSVC++, - # which start with @ or ?. - lt_cv_sys_global_symbol_pipe="$AWK '"\ -" {last_section=section; section=\$ 3};"\ -" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ -" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ -" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ -" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ -" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ -" \$ 0!~/External *\|/{next};"\ -" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ -" {if(hide[section]) next};"\ -" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ -" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ -" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ -" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ -" ' prfx=^$ac_symprfx" - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi - lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - # Check to see that the pipe works correctly. - pipe_works=no - rm -f conftest* - cat > conftest.$ac_ext <<_LT_EOF -#ifdef __cplusplus -extern "C" { -#endif -char nm_test_var; -void nm_test_func(void); -void nm_test_func(void){} -#ifdef __cplusplus -} -#endif -int main(){nm_test_var='a';nm_test_func();return(0);} -_LT_EOF - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - # Now try to grab the symbols. - nlist=conftest.nm - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 - (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s "$nlist"; then - # Try sorting and uniquifying the output. - if sort "$nlist" | uniq > "$nlist"T; then - mv -f "$nlist"T "$nlist" - else - rm -f "$nlist"T - fi - # Make sure that we snagged all the symbols we need. - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE -/* DATA imports from DLLs on WIN32 can't be const, because runtime - relocations are performed -- see ld's documentation on pseudo-relocs. */ -# define LT_DLSYM_CONST -#elif defined __osf__ -/* This system does not cope well with relocations in const data. */ -# define LT_DLSYM_CONST -#else -# define LT_DLSYM_CONST const -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -_LT_EOF - # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' - - cat <<_LT_EOF >> conftest.$ac_ext - -/* The mapping between symbol names and symbols. */ -LT_DLSYM_CONST struct { - const char *name; - void *address; -} -lt__PROGRAM__LTX_preloaded_symbols[] = -{ - { "@PROGRAM@", (void *) 0 }, -_LT_EOF - $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext - cat <<\_LT_EOF >> conftest.$ac_ext - {0, (void *) 0} -}; - -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt__PROGRAM__LTX_preloaded_symbols; -} -#endif - -#ifdef __cplusplus -} -#endif -_LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext - lt_globsym_save_LIBS=$LIBS - lt_globsym_save_CFLAGS=$CFLAGS - LIBS=conftstm.$ac_objext - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s conftest$ac_exeext; then - pipe_works=yes - fi - LIBS=$lt_globsym_save_LIBS - CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi - else - echo "cannot find nm_test_var in $nlist" >&5 - fi - else - echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 - fi - else - echo "$progname: failed program was:" >&5 - cat conftest.$ac_ext >&5 - fi - rm -rf conftest* conftst* - # Do not use the global_symbol_pipe unless it works. - if test yes = "$pipe_works"; then - break - else - lt_cv_sys_global_symbol_pipe= +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_AR+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 fi done + done +IFS=$as_save_IFS fi - -if test -z "$lt_cv_sys_global_symbol_pipe"; then - lt_cv_sys_global_symbol_to_cdecl= fi -if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 -$as_echo "failed" >&6; } +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +printf "%s\n" "$AR" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 -$as_echo "ok" >&6; } -fi - -# Response file support. -if test "$lt_cv_nm_interface" = "MS dumpbin"; then - nm_file_list_spec='@' -elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then - nm_file_list_spec='@' + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_AR+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="$ac_prog" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +printf "%s\n" "$ac_ct_AR" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + test -n "$ac_ct_AR" && break +done + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi +: ${AR=ar} +: ${AR_FLAGS=cr} @@ -8118,1231 +9032,652 @@ fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +printf %s "checking for archiver @FILE support... " >&6; } +if test ${lt_cv_ar_at_file+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int +main (void) +{ + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -eq "$ac_status"; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -ne "$ac_status"; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +printf "%s\n" "$lt_cv_ar_at_file" >&6; } - - - - - - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -$as_echo_n "checking for sysroot... " >&6; } - -# Check whether --with-sysroot was given. -if test "${with_sysroot+set}" = set; then : - withval=$with_sysroot; +if test no = "$lt_cv_ar_at_file"; then + archiver_list_spec= else - with_sysroot=no + archiver_list_spec=$lt_cv_ar_at_file fi -lt_sysroot= -case $with_sysroot in #( - yes) - if test yes = "$GCC"; then - lt_sysroot=`$CC --print-sysroot 2>/dev/null` - fi - ;; #( - /*) - lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` - ;; #( - no|'') - ;; #( - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 -$as_echo "$with_sysroot" >&6; } - as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 - ;; -esac - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -$as_echo "${lt_sysroot:-no}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 -$as_echo_n "checking for a working dd... " >&6; } -if ${ac_cv_path_lt_DD+:} false; then : - $as_echo_n "(cached) " >&6 +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else - printf 0123456789abcdef0123456789abcdef >conftest.i -cat conftest.i conftest.i >conftest2.i -: ${lt_DD:=$DD} -if test -z "$lt_DD"; then - ac_path_lt_DD_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in dd; do + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_lt_DD="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_lt_DD" || continue -if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then - cmp -s conftest.i conftest.out \ - && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: -fi - $ac_path_lt_DD_found && break 3 - done - done + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done done IFS=$as_save_IFS - if test -z "$ac_cv_path_lt_DD"; then - : - fi -else - ac_cv_path_lt_DD=$lt_DD -fi -rm -f conftest.i conftest2.i conftest.out fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 -$as_echo "$ac_cv_path_lt_DD" >&6; } +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +printf "%s\n" "$STRIP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 -$as_echo_n "checking how to truncate binary pipes... " >&6; } -if ${lt_cv_truncate_bin+:} false; then : - $as_echo_n "(cached) " >&6 -else - printf 0123456789abcdef0123456789abcdef >conftest.i -cat conftest.i conftest.i >conftest2.i -lt_cv_truncate_bin= -if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then - cmp -s conftest.i conftest.out \ - && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" fi -rm -f conftest.i conftest2.i conftest.out -test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_STRIP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +printf "%s\n" "$ac_ct_STRIP" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 -$as_echo "$lt_cv_truncate_bin" >&6; } + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi +test -z "$STRIP" && STRIP=: -# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. -func_cc_basename () -{ - for cc_temp in $*""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac - done - func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` -} -# Check whether --enable-libtool-lock was given. -if test "${enable_libtool_lock+set}" = set; then : - enableval=$enable_libtool_lock; +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_RANLIB+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +printf "%s\n" "$RANLIB" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi -test no = "$enable_libtool_lock" || enable_libtool_lock=yes -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out what ABI is being produced by ac_compile, and set mode - # options accordingly. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE=32 - ;; - *ELF-64*) - HPUX_IA64_MODE=64 - ;; - esac - fi - rm -rf conftest* - ;; -*-*-irix6*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo '#line '$LINENO' "configure"' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - if test yes = "$lt_cv_prog_gnu_ld"; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_RANLIB+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 fi - rm -rf conftest* - ;; +done + done +IFS=$as_save_IFS -mips64*-*linux*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo '#line '$LINENO' "configure"' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - emul=elf - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - emul="${emul}32" - ;; - *64-bit*) - emul="${emul}64" - ;; - esac - case `/usr/bin/file conftest.$ac_objext` in - *MSB*) - emul="${emul}btsmip" - ;; - *LSB*) - emul="${emul}ltsmip" - ;; - esac - case `/usr/bin/file conftest.$ac_objext` in - *N32*) - emul="${emul}n32" - ;; - esac - LD="${LD-ld} -m $emul" +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +printf "%s\n" "$ac_ct_RANLIB" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB fi - rm -rf conftest* - ;; +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: -x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ -s390*-*linux*|s390*-*tpf*|sparc*-*linux*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. Note that the listed cases only cover the - # situations where additional linker options are needed (such as when - # doing 32-bit compilation for a host where ld defaults to 64-bit, or - # vice versa); the common cases where no linker options are needed do - # not appear in the list. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_i386_fbsd" - ;; - x86_64-*linux*) - case `/usr/bin/file conftest.o` in - *x86-64*) - LD="${LD-ld} -m elf32_x86_64" - ;; - *) - LD="${LD-ld} -m elf_i386" - ;; - esac - ;; - powerpc64le-*linux*) - LD="${LD-ld} -m elf32lppclinux" - ;; - powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_x86_64_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - powerpcle-*linux*) - LD="${LD-ld} -m elf64lppc" - ;; - powerpc-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*|s390*-*tpf*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS=$CFLAGS - CFLAGS="$CFLAGS -belf" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 -$as_echo_n "checking whether the C compiler needs -belf... " >&6; } -if ${lt_cv_cc_needs_belf+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_cc_needs_belf=yes -else - lt_cv_cc_needs_belf=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + bitrig* | openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 -$as_echo "$lt_cv_cc_needs_belf" >&6; } - if test yes != "$lt_cv_cc_needs_belf"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS=$SAVE_CFLAGS - fi - ;; -*-*solaris*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.o` in - *64-bit*) - case $lt_cv_prog_gnu_ld in - yes*) - case $host in - i?86-*-solaris*|x86_64-*-solaris*) - LD="${LD-ld} -m elf_x86_64" - ;; - sparc*-*-solaris*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - # GNU ld 2.21 introduced _sol2 emulations. Use them if available. - if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then - LD=${LD-ld}_sol2 - fi - ;; - *) - if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then - LD="${LD-ld} -64" - fi - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; esac -need_locks=$enable_libtool_lock -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -set dummy ${ac_tool_prefix}mt; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$MANIFEST_TOOL"; then - ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -if test -n "$MANIFEST_TOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -$as_echo "$MANIFEST_TOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -fi -if test -z "$ac_cv_prog_MANIFEST_TOOL"; then - ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL - # Extract the first word of "mt", so it can be a program name with args. -set dummy mt; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_MANIFEST_TOOL"; then - ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -if test -n "$ac_ct_MANIFEST_TOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - if test "x$ac_ct_MANIFEST_TOOL" = x; then - MANIFEST_TOOL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL - fi -else - MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -fi -test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -if ${lt_cv_path_mainfest_tool+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_path_mainfest_tool=no - echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 - $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out - cat conftest.err >&5 - if $GREP 'Manifest Tool' conftest.out > /dev/null; then - lt_cv_path_mainfest_tool=yes - fi - rm -f conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -$as_echo "$lt_cv_path_mainfest_tool" >&6; } -if test yes != "$lt_cv_path_mainfest_tool"; then - MANIFEST_TOOL=: -fi - case $host_os in - rhapsody* | darwin*) - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. -set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_DSYMUTIL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DSYMUTIL"; then - ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -DSYMUTIL=$ac_cv_prog_DSYMUTIL -if test -n "$DSYMUTIL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 -$as_echo "$DSYMUTIL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -fi -if test -z "$ac_cv_prog_DSYMUTIL"; then - ac_ct_DSYMUTIL=$DSYMUTIL - # Extract the first word of "dsymutil", so it can be a program name with args. -set dummy dsymutil; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DSYMUTIL"; then - ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL -if test -n "$ac_ct_DSYMUTIL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 -$as_echo "$ac_ct_DSYMUTIL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - if test "x$ac_ct_DSYMUTIL" = x; then - DSYMUTIL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DSYMUTIL=$ac_ct_DSYMUTIL - fi -else - DSYMUTIL="$ac_cv_prog_DSYMUTIL" -fi - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. -set dummy ${ac_tool_prefix}nmedit; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_NMEDIT+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$NMEDIT"; then - ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -NMEDIT=$ac_cv_prog_NMEDIT -if test -n "$NMEDIT"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 -$as_echo "$NMEDIT" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -fi -if test -z "$ac_cv_prog_NMEDIT"; then - ac_ct_NMEDIT=$NMEDIT - # Extract the first word of "nmedit", so it can be a program name with args. -set dummy nmedit; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_NMEDIT"; then - ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_NMEDIT="nmedit" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT -if test -n "$ac_ct_NMEDIT"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 -$as_echo "$ac_ct_NMEDIT" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - if test "x$ac_ct_NMEDIT" = x; then - NMEDIT=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - NMEDIT=$ac_ct_NMEDIT - fi -else - NMEDIT="$ac_cv_prog_NMEDIT" -fi - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. -set dummy ${ac_tool_prefix}lipo; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_LIPO+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$LIPO"; then - ac_cv_prog_LIPO="$LIPO" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_LIPO="${ac_tool_prefix}lipo" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -LIPO=$ac_cv_prog_LIPO -if test -n "$LIPO"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 -$as_echo "$LIPO" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -fi -if test -z "$ac_cv_prog_LIPO"; then - ac_ct_LIPO=$LIPO - # Extract the first word of "lipo", so it can be a program name with args. -set dummy lipo; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_LIPO+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_LIPO"; then - ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_LIPO="lipo" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO -if test -n "$ac_ct_LIPO"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 -$as_echo "$ac_ct_LIPO" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - if test "x$ac_ct_LIPO" = x; then - LIPO=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - LIPO=$ac_ct_LIPO - fi -else - LIPO="$ac_cv_prog_LIPO" -fi - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. -set dummy ${ac_tool_prefix}otool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_OTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OTOOL"; then - ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_OTOOL="${ac_tool_prefix}otool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -OTOOL=$ac_cv_prog_OTOOL -if test -n "$OTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 -$as_echo "$OTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -fi -if test -z "$ac_cv_prog_OTOOL"; then - ac_ct_OTOOL=$OTOOL - # Extract the first word of "otool", so it can be a program name with args. -set dummy otool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OTOOL"; then - ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_OTOOL="otool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL -if test -n "$ac_ct_OTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 -$as_echo "$ac_ct_OTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - if test "x$ac_ct_OTOOL" = x; then - OTOOL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OTOOL=$ac_ct_OTOOL - fi -else - OTOOL="$ac_cv_prog_OTOOL" -fi - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. -set dummy ${ac_tool_prefix}otool64; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_OTOOL64+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OTOOL64"; then - ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS -fi -fi -OTOOL64=$ac_cv_prog_OTOOL64 -if test -n "$OTOOL64"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 -$as_echo "$OTOOL64" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_OTOOL64"; then - ac_ct_OTOOL64=$OTOOL64 - # Extract the first word of "otool64", so it can be a program name with args. -set dummy otool64; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OTOOL64"; then - ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_OTOOL64="otool64" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 -if test -n "$ac_ct_OTOOL64"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 -$as_echo "$ac_ct_OTOOL64" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_OTOOL64" = x; then - OTOOL64=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OTOOL64=$ac_ct_OTOOL64 - fi -else - OTOOL64="$ac_cv_prog_OTOOL64" -fi +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} +# Allow CC to be a program name with arguments. +compiler=$CC +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +printf %s "checking command to parse $NM output from $compiler object... " >&6; } +if test ${lt_cv_sys_global_symbol_pipe+y} +then : + printf %s "(cached) " >&6 +else $as_nop +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test ia64 = "$host_cpu"; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Gets list of data symbols to import. + lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" + # Adjust the below global symbol transforms to fixup imported variables. + lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" + lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" + lt_c_name_lib_hook="\ + -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ + -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" +else + # Disable hooks by default. + lt_cv_sys_global_symbol_to_import= + lt_cdecl_hook= + lt_c_name_hook= + lt_c_name_lib_hook= +fi +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n"\ +$lt_cdecl_hook\ +" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ +$lt_c_name_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" +# Transform an extracted symbol line into symbol name with lib prefix and +# symbol address. +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ +$lt_c_name_lib_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function, + # D for any global variable and I for any imported variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ +" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ +" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ +" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ +" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + # Check to see that the pipe works correctly. + pipe_works=no + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + $ECHO "$as_me:$LINENO: $NM conftest.$ac_objext | $lt_cv_sys_global_symbol_pipe > $nlist" >&5 + if eval "$NM" conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist 2>&5 && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif +#ifdef __cplusplus +extern "C" { +#endif +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + cat <<_LT_EOF >> conftest.$ac_ext +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 -$as_echo_n "checking for -single_module linker flag... " >&6; } -if ${lt_cv_apple_cc_single_mod+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_apple_cc_single_mod=no - if test -z "$LT_MULTI_MODULE"; then - # By default we will add the -single_module flag. You can override - # by either setting the environment variable LT_MULTI_MODULE - # non-empty at configure time, or by adding -multi_module to the - # link flags. - rm -rf libconftest.dylib* - echo "int foo(void){return 1;}" > conftest.c - echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ --dynamiclib -Wl,-single_module conftest.c" >&5 - $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ - -dynamiclib -Wl,-single_module conftest.c 2>conftest.err - _lt_result=$? - # If there is a non-empty error log, and "single_module" - # appears in it, assume the flag caused a linker warning - if test -s conftest.err && $GREP single_module conftest.err; then - cat conftest.err >&5 - # Otherwise, if the output was created with a 0 exit code from - # the compiler, it worked. - elif test -f libconftest.dylib && test 0 = "$_lt_result"; then - lt_cv_apple_cc_single_mod=yes +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS=conftstm.$ac_objext + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest$ac_exeext; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS else - cat conftest.err >&5 + echo "cannot find nm_test_func in $nlist" >&5 fi - rm -rf libconftest.dylib* - rm -f conftest.* + else + echo "cannot find nm_test_var in $nlist" >&5 fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test yes = "$pipe_works"; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 -$as_echo "$lt_cv_apple_cc_single_mod" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 -$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } -if ${lt_cv_ld_exported_symbols_list+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_exported_symbols_list=no - save_LDFLAGS=$LDFLAGS - echo "_main" > conftest.sym - LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_ld_exported_symbols_list=yes +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +printf "%s\n" "failed" >&6; } else - lt_cv_ld_exported_symbols_list=no + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +printf "%s\n" "ok" >&6; } fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS=$save_LDFLAGS +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 -$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 -$as_echo_n "checking for -force_load linker flag... " >&6; } -if ${lt_cv_ld_force_load+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_force_load=no - cat > conftest.c << _LT_EOF -int forced_loaded() { return 2;} -_LT_EOF - echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 - echo "$RANLIB libconftest.a" >&5 - $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF -int main() { return 0;} -_LT_EOF - echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 - $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err - _lt_result=$? - if test -s conftest.err && $GREP force_load conftest.err; then - cat conftest.err >&5 - elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then - lt_cv_ld_force_load=yes - else - cat conftest.err >&5 - fi - rm -f conftest.err libconftest.a conftest conftest.c - rm -rf conftest.dSYM -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 -$as_echo "$lt_cv_ld_force_load" >&6; } - case $host_os in - rhapsody* | darwin1.[012]) - _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; - darwin1.*) - _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; - darwin*) # darwin 5.x on - # if running on 10.5 or later, the deployment target defaults - # to the OS version, if on x86, and 10.4, the deployment - # target defaults to 10.4. Don't you love it? - case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in - 10.0,*86*-darwin8*|10.0,*-darwin[91]*) - _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; - 10.[012][,.]*) - _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; - 10.*) - _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; - esac - ;; - esac - if test yes = "$lt_cv_apple_cc_single_mod"; then - _lt_dar_single_mod='$single_module' - fi - if test yes = "$lt_cv_ld_exported_symbols_list"; then - _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' - else - _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' - fi - if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then - _lt_dsymutil='~$DSYMUTIL $lib || :' - else - _lt_dsymutil= - fi - ;; - esac -# func_munge_path_list VARIABLE PATH -# ----------------------------------- -# VARIABLE is name of variable containing _space_ separated list of -# directories to be munged by the contents of PATH, which is string -# having a format: -# "DIR[:DIR]:" -# string "DIR[ DIR]" will be prepended to VARIABLE -# ":DIR[:DIR]" -# string "DIR[ DIR]" will be appended to VARIABLE -# "DIRP[:DIRP]::[DIRA:]DIRA" -# string "DIRP[ DIRP]" will be prepended to VARIABLE and string -# "DIRA[ DIRA]" will be appended to VARIABLE -# "DIR[:DIR]" -# VARIABLE will be replaced by "DIR[ DIR]" -func_munge_path_list () -{ - case x$2 in - x) - ;; - *:) - eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" - ;; - x:*) - eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" - ;; - *::*) - eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" - eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" - ;; - *) - eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" - ;; - esac -} -for ac_header in dlfcn.h -do : - ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default -" -if test "x$ac_cv_header_dlfcn_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_DLFCN_H 1 -_ACEOF -fi -done -# Set options - enable_dlopen=no - # Check whether --enable-shared was given. -if test "${enable_shared+set}" = set; then : - enableval=$enable_shared; p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS=$lt_save_ifs - ;; - esac -else - enable_shared=yes -fi @@ -9352,28 +9687,6 @@ fi - # Check whether --enable-static was given. -if test "${enable_static+set}" = set; then : - enableval=$enable_static; p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS=$lt_save_ifs - ;; - esac -else - enable_static=yes -fi @@ -9384,129 +9697,111 @@ fi -# Check whether --with-pic was given. -if test "${with_pic+set}" = set; then : - withval=$with_pic; lt_p=${PACKAGE-default} - case $withval in - yes|no) pic_mode=$withval ;; - *) - pic_mode=default - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for lt_pkg in $withval; do - IFS=$lt_save_ifs - if test "X$lt_pkg" = "X$lt_p"; then - pic_mode=yes - fi - done - IFS=$lt_save_ifs - ;; - esac -else - pic_mode=default + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +printf %s "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test ${with_sysroot+y} +then : + withval=$with_sysroot; +else $as_nop + with_sysroot=no fi +lt_sysroot= +case $with_sysroot in #( + yes) + if test yes = "$GCC"; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 +printf "%s\n" "$with_sysroot" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +printf "%s\n" "${lt_sysroot:-no}" >&6; } - # Check whether --enable-fast-install was given. -if test "${enable_fast_install+set}" = set; then : - enableval=$enable_fast_install; p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS=$lt_save_ifs - ;; - esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 +printf %s "checking for a working dd... " >&6; } +if test ${ac_cv_path_lt_DD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +: ${lt_DD:=$DD} +if test -z "$lt_DD"; then + ac_path_lt_DD_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_prog in dd + do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_lt_DD="$as_dir$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_lt_DD" || continue +if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: +fi + $ac_path_lt_DD_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_lt_DD"; then + : + fi else - enable_fast_install=yes + ac_cv_path_lt_DD=$lt_DD fi +rm -f conftest.i conftest2.i conftest.out +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 +printf "%s\n" "$ac_cv_path_lt_DD" >&6; } - - - - - - shared_archive_member_spec= -case $host,$enable_shared in -power*-*-aix[5-9]*,yes) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 -$as_echo_n "checking which variant of shared library versioning to provide... " >&6; } - -# Check whether --with-aix-soname was given. -if test "${with_aix_soname+set}" = set; then : - withval=$with_aix_soname; case $withval in - aix|svr4|both) - ;; - *) - as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 - ;; - esac - lt_cv_with_aix_soname=$with_aix_soname -else - if ${lt_cv_with_aix_soname+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_with_aix_soname=aix +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 +printf %s "checking how to truncate binary pipes... " >&6; } +if test ${lt_cv_truncate_bin+y} +then : + printf %s "(cached) " >&6 +else $as_nop + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +lt_cv_truncate_bin= +if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" fi - - with_aix_soname=$lt_cv_with_aix_soname +rm -f conftest.i conftest2.i conftest.out +test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 -$as_echo "$with_aix_soname" >&6; } - if test aix != "$with_aix_soname"; then - # For the AIX way of multilib, we name the shared archive member - # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', - # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. - # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, - # the AIX toolchain works better with OBJECT_MODE set (default 32). - if test 64 = "${OBJECT_MODE-32}"; then - shared_archive_member_spec=shr_64 - else - shared_archive_member_spec=shr - fi - fi - ;; -*) - with_aix_soname=aix - ;; -esac - - - - - - - - - - -# This can be used to rebuild libtool when needed -LIBTOOL_DEPS=$ltmain - -# Always use our own libtool. -LIBTOOL='$(SHELL) $(top_builddir)/libtool' - - - - +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 +printf "%s\n" "$lt_cv_truncate_bin" >&6; } @@ -9514,783 +9809,1216 @@ LIBTOOL='$(SHELL) $(top_builddir)/libtool' +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} +# Check whether --enable-libtool-lock was given. +if test ${enable_libtool_lock+y} +then : + enableval=$enable_libtool_lock; +fi +test no = "$enable_libtool_lock" || enable_libtool_lock=yes +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out what ABI is being produced by ac_compile, and set mode + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE=32 + ;; + *ELF-64*) + HPUX_IA64_MODE=64 + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test yes = "$lt_cv_prog_gnu_ld"; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; +mips64*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + emul=elf + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + emul="${emul}32" + ;; + *64-bit*) + emul="${emul}64" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *MSB*) + emul="${emul}btsmip" + ;; + *LSB*) + emul="${emul}ltsmip" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *N32*) + emul="${emul}n32" + ;; + esac + LD="${LD-ld} -m $emul" + fi + rm -rf conftest* + ;; +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. Note that the listed cases only cover the + # situations where additional linker options are needed (such as when + # doing 32-bit compilation for a host where ld defaults to 64-bit, or + # vice versa); the common cases where no linker options are needed do + # not appear in the list. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + case `/usr/bin/file conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; + *) + LD="${LD-ld} -m elf_i386" + ;; + esac + ;; + powerpc64le-*linux*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*linux*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS -belf" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +printf %s "checking whether the C compiler needs -belf... " >&6; } +if test ${lt_cv_cc_needs_belf+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int +main (void) +{ + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + lt_cv_cc_needs_belf=yes +else $as_nop + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +printf "%s\n" "$lt_cv_cc_needs_belf" >&6; } + if test yes != "$lt_cv_cc_needs_belf"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS=$SAVE_CFLAGS + fi + ;; +*-*solaris*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*|x86_64-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD=${LD-ld}_sol2 + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac +need_locks=$enable_libtool_lock +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_MANIFEST_TOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +printf "%s\n" "$MANIFEST_TOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_MANIFEST_TOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +printf "%s\n" "$ac_ct_MANIFEST_TOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +printf %s "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if test ${lt_cv_path_mainfest_tool+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +printf "%s\n" "$lt_cv_path_mainfest_tool" >&6; } +if test yes != "$lt_cv_path_mainfest_tool"; then + MANIFEST_TOOL=: +fi -test -z "$LN_S" && LN_S="ln -s" + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_DSYMUTIL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +printf "%s\n" "$DSYMUTIL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_DSYMUTIL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +printf "%s\n" "$ac_ct_DSYMUTIL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_NMEDIT+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +printf "%s\n" "$NMEDIT" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi -if test -n "${ZSH_VERSION+set}"; then - setopt NO_GLOB_SUBST fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 -$as_echo_n "checking for objdir... " >&6; } -if ${lt_cv_objdir+:} false; then : - $as_echo_n "(cached) " >&6 -else - rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_NMEDIT+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + fi -rmdir .libs 2>/dev/null fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 -$as_echo "$lt_cv_objdir" >&6; } -objdir=$lt_cv_objdir +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +printf "%s\n" "$ac_ct_NMEDIT" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi - - - - -cat >>confdefs.h <<_ACEOF -#define LT_OBJDIR "$lt_cv_objdir/" -_ACEOF - - - - -case $host_os in -aix3*) - # AIX sometimes has problems with the GCC collect2 program. For some - # reason, if we set the COLLECT_NAMES environment variable, the problems - # vanish in a puff of smoke. - if test set != "${COLLECT_NAMES+set}"; then - COLLECT_NAMES= - export COLLECT_NAMES - fi - ;; + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; esac - -# Global variables: -ofile=libtool -can_build_shared=yes - -# All known linkers require a '.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a - -with_gnu_ld=$lt_cv_prog_gnu_ld - -old_CC=$CC -old_CFLAGS=$CFLAGS - -# Set sane defaults for various variables -test -z "$CC" && CC=cc -test -z "$LTCC" && LTCC=$CC -test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS -test -z "$LD" && LD=ld -test -z "$ac_objext" && ac_objext=o - -func_cc_basename $compiler -cc_basename=$func_cc_basename_result - - -# Only perform the check for file, if the check method requires it -test -z "$MAGIC_CMD" && MAGIC_CMD=file -case $deplibs_check_method in -file_magic*) - if test "$file_magic_cmd" = '$MAGIC_CMD'; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 -$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } -if ${lt_cv_path_MAGIC_CMD+:} false; then : - $as_echo_n "(cached) " >&6 + NMEDIT=$ac_ct_NMEDIT + fi else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD=$MAGIC_CMD - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/${ac_tool_prefix}file"; then - lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD=$lt_cv_path_MAGIC_CMD - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org + NMEDIT="$ac_cv_prog_NMEDIT" +fi -_LT_EOF - fi ;; - esac - fi - break - fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_LIPO+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done done - IFS=$lt_save_ifs - MAGIC_CMD=$lt_save_MAGIC_CMD - ;; -esac -fi +IFS=$as_save_IFS -MAGIC_CMD=$lt_cv_path_MAGIC_CMD -if test -n "$MAGIC_CMD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 -$as_echo "$MAGIC_CMD" >&6; } +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +printf "%s\n" "$LIPO" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - - - -if test -z "$lt_cv_path_MAGIC_CMD"; then - if test -n "$ac_tool_prefix"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 -$as_echo_n "checking for file... " >&6; } -if ${lt_cv_path_MAGIC_CMD+:} false; then : - $as_echo_n "(cached) " >&6 +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_LIPO+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD=$MAGIC_CMD - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/file"; then - lt_cv_path_MAGIC_CMD=$ac_dir/"file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD=$lt_cv_path_MAGIC_CMD - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -_LT_EOF - fi ;; - esac - fi - break - fi +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_LIPO="lipo" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done done - IFS=$lt_save_ifs - MAGIC_CMD=$lt_save_MAGIC_CMD - ;; -esac -fi +IFS=$as_save_IFS -MAGIC_CMD=$lt_cv_path_MAGIC_CMD -if test -n "$MAGIC_CMD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 -$as_echo "$MAGIC_CMD" >&6; } +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +printf "%s\n" "$ac_ct_LIPO" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - + if test "x$ac_ct_LIPO" = x; then + LIPO=":" else - MAGIC_CMD=: + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO fi +else + LIPO="$ac_cv_prog_LIPO" fi + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_OTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 fi - ;; -esac - -# Use C for the default configuration in the libtool script - -lt_save_CC=$CC -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# Source file extension for C test sources. -ac_ext=c - -# Object file extension for compiled C test sources. -objext=o -objext=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" - -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}' - +done + done +IFS=$as_save_IFS +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +printf "%s\n" "$OTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_OTOOL+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL="otool" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +printf "%s\n" "$ac_ct_OTOOL" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_OTOOL64+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +printf "%s\n" "$OTOOL64" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi -# Allow CC to be a program name with arguments. -compiler=$CC -# Save the default compiler, since it gets overwritten when the other -# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. -compiler_DEFAULT=$CC +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +printf %s "checking for $ac_word... " >&6; } +if test ${ac_cv_prog_ac_ct_OTOOL64+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$RM conftest* +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +printf "%s\n" "$ac_ct_OTOOL64" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$RM -r conftest* + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi -## CAVEAT EMPTOR: -## There is no encapsulation within the following macros, do not change -## the running order or otherwise move them around unless you know exactly -## what you are doing... -if test -n "$compiler"; then -lt_prog_compiler_no_builtin_flag= -if test yes = "$GCC"; then - case $cc_basename in - nvcc*) - lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; - *) - lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 -$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } -if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_rtti_exceptions=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_rtti_exceptions=yes - fi - fi - $RM conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 -$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } -if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then - lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" -else - : -fi -fi - lt_prog_compiler_wl= -lt_prog_compiler_pic= -lt_prog_compiler_static= - if test yes = "$GCC"; then - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_static='-static' - case $host_os in - aix*) - # All AIX code is PIC. - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - fi - lt_prog_compiler_pic='-fPIC' - ;; - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - lt_prog_compiler_pic='-fPIC' - ;; - m68k) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the '-m68020' flag to GCC prevents building anything better, - # like '-m68040'. - lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' - ;; - esac - ;; - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - lt_prog_compiler_pic='-DDLL_EXPORT' - case $host_os in - os2*) - lt_prog_compiler_static='$wl-static' - ;; - esac - ;; - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic='-fno-common' - ;; - haiku*) - # PIC is the default for Haiku. - # The "-static" flag exists, but is broken. - lt_prog_compiler_static= - ;; - hpux*) - # PIC is the default for 64-bit PA HP-UX, but not for 32-bit - # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag - # sets the default TLS model and affects inlining. - case $host_cpu in - hppa*64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac - ;; - interix[3-9]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - lt_prog_compiler_can_build_shared=no - enable_shared=no - ;; - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - lt_prog_compiler_pic='-fPIC -shared' - ;; - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic=-Kconform_pic + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +printf %s "checking for -single_module linker flag... " >&6; } +if test ${lt_cv_apple_cc_single_mod+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_apple_cc_single_mod=no + if test -z "$LT_MULTI_MODULE"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&5 + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test 0 = "$_lt_result"; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* fi - ;; +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +printf "%s\n" "$lt_cv_apple_cc_single_mod" >&6; } - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +printf %s "checking for -exported_symbols_list linker flag... " >&6; } +if test ${lt_cv_ld_exported_symbols_list+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ - case $cc_basename in - nvcc*) # Cuda Compiler Driver 2.2 - lt_prog_compiler_wl='-Xlinker ' - if test -n "$lt_prog_compiler_pic"; then - lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" - fi - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - lt_prog_compiler_wl='-Wl,' - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - else - lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' - fi - ;; +int +main (void) +{ - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic='-fno-common' - case $cc_basename in - nagfor*) - # NAG Fortran compiler - lt_prog_compiler_wl='-Wl,-Wl,,' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; - esac - ;; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + lt_cv_ld_exported_symbols_list=yes +else $as_nop + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - lt_prog_compiler_pic='-DDLL_EXPORT' - case $host_os in - os2*) - lt_prog_compiler_static='$wl-static' - ;; - esac - ;; +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +printf "%s\n" "$lt_cv_ld_exported_symbols_list" >&6; } - hpux9* | hpux10* | hpux11*) - lt_prog_compiler_wl='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - lt_prog_compiler_static='$wl-a ${wl}archive' - ;; + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +printf %s "checking for -force_load linker flag... " >&6; } +if test ${lt_cv_ld_force_load+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cr libconftest.a conftest.o" >&5 + $AR cr libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&5 + elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM - irix5* | irix6* | nonstopux*) - lt_prog_compiler_wl='-Wl,' - # PIC (with -KPIC) is the default. - lt_prog_compiler_static='-non_shared' - ;; +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +printf "%s\n" "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[912]*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + 10.[012][,.]*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + 10.*|11.*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test yes = "$lt_cv_apple_cc_single_mod"; then + _lt_dar_single_mod='$single_module' + fi + if test yes = "$lt_cv_ld_exported_symbols_list"; then + _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' + fi + if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac - linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - case $cc_basename in - # old Intel for x86_64, which still supported -KPIC. - ecc*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-static' +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) ;; - # icc used to be incompatible with GCC. - # ICC 10 doesn't accept -KPIC any more. - icc* | ifort*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ;; - # Lahey Fortran 8.1. - lf95*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; - nagfor*) - # NAG Fortran compiler - lt_prog_compiler_wl='-Wl,-Wl,,' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; - tcc*) - # Fabrice Bellard et al's Tiny C Compiler - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fpic' - lt_prog_compiler_static='-Bstatic' + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ;; - ccc*) - lt_prog_compiler_wl='-Wl,' - # All Alpha code is PIC. - lt_prog_compiler_static='-non_shared' + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ;; - xl* | bgxl* | bgf* | mpixl*) - # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-qpic' - lt_prog_compiler_static='-qstaticlink' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='' - ;; - *Sun\ F* | *Sun*Fortran*) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='-Qoption ld ' - ;; - *Sun\ C*) - # Sun C 5.9 - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='-Wl,' - ;; - *Intel*\ [CF]*Compiler*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' - ;; - *Portland\ Group*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fpic' - lt_prog_compiler_static='-Bstatic' - ;; - esac - ;; - esac - ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} - newsos6) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; +ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes +then : + printf "%s\n" "#define HAVE_DLFCN_H 1" >>confdefs.h - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - lt_prog_compiler_pic='-fPIC -shared' - ;; +fi - osf3* | osf4* | osf5*) - lt_prog_compiler_wl='-Wl,' - # All OSF/1 code is PIC. - lt_prog_compiler_static='-non_shared' - ;; - rdos*) - lt_prog_compiler_static='-non_shared' - ;; - solaris*) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in - f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; - esac - ;; - sunos4*) - lt_prog_compiler_wl='-Qoption ld ' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; +func_stripname_cnf () +{ + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; + esac +} # func_stripname_cnf - sysv4 | sysv4.2uw2* | sysv4.3*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic='-Kconform_pic' - lt_prog_compiler_static='-Bstatic' - fi - ;; - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - unicos*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_can_build_shared=no - ;; - uts4*) - lt_prog_compiler_pic='-pic' - lt_prog_compiler_static='-Bstatic' - ;; +# Set options + + + + enable_dlopen=no + + + # Check whether --enable-shared was given. +if test ${enable_shared+y} +then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; *) - lt_prog_compiler_can_build_shared=no + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS=$lt_save_ifs ;; esac - fi +else $as_nop + enable_shared=yes +fi -case $host_os in - # For platforms that do not support PIC, -DPIC is meaningless: - *djgpp*) - lt_prog_compiler_pic= - ;; - *) - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -if ${lt_cv_prog_compiler_pic+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -$as_echo "$lt_cv_prog_compiler_pic" >&6; } -lt_prog_compiler_pic=$lt_cv_prog_compiler_pic -# -# Check to make sure the PIC flag actually works. -# -if test -n "$lt_prog_compiler_pic"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 -$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } -if ${lt_cv_prog_compiler_pic_works+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_pic_works=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_pic_works=yes - fi - fi - $RM conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 -$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } -if test yes = "$lt_cv_prog_compiler_pic_works"; then - case $lt_prog_compiler_pic in - "" | " "*) ;; - *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; - esac -else - lt_prog_compiler_pic= - lt_prog_compiler_can_build_shared=no -fi -fi + # Check whether --enable-static was given. +if test ${enable_static+y} +then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else $as_nop + enable_static=yes +fi @@ -10299,45 +11027,29 @@ fi -# -# Check to make sure the static flag actually works. -# -wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 -$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } -if ${lt_cv_prog_compiler_static_works+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_static_works=no - save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS $lt_tmp_static_flag" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_static_works=yes - fi - else - lt_cv_prog_compiler_static_works=yes - fi - fi - $RM -r conftest* - LDFLAGS=$save_LDFLAGS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 -$as_echo "$lt_cv_prog_compiler_static_works" >&6; } -if test yes = "$lt_cv_prog_compiler_static_works"; then - : -else - lt_prog_compiler_static= +# Check whether --with-pic was given. +if test ${with_pic+y} +then : + withval=$with_pic; lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for lt_pkg in $withval; do + IFS=$lt_save_ifs + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else $as_nop + pic_mode=default fi @@ -10346,1624 +11058,895 @@ fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 -$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } -if ${lt_cv_prog_compiler_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_c_o=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o=yes - fi - fi - chmod u+w . 2>&5 - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* + # Check whether --enable-fast-install was given. +if test ${enable_fast_install+y} +then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else $as_nop + enable_fast_install=yes fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 -$as_echo "$lt_cv_prog_compiler_c_o" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 -$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } -if ${lt_cv_prog_compiler_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_c_o=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o=yes - fi - fi - chmod u+w . 2>&5 - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* + shared_archive_member_spec= +case $host,$enable_shared in +power*-*-aix[5-9]*,yes) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 +printf %s "checking which variant of shared library versioning to provide... " >&6; } + +# Check whether --with-aix-soname was given. +if test ${with_aix_soname+y} +then : + withval=$with_aix_soname; case $withval in + aix|svr4|both) + ;; + *) + as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 + ;; + esac + lt_cv_with_aix_soname=$with_aix_soname +else $as_nop + if test ${lt_cv_with_aix_soname+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_with_aix_soname=aix fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 -$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + with_aix_soname=$lt_cv_with_aix_soname +fi + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 +printf "%s\n" "$with_aix_soname" >&6; } + if test aix != "$with_aix_soname"; then + # For the AIX way of multilib, we name the shared archive member + # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', + # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. + # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, + # the AIX toolchain works better with OBJECT_MODE set (default 32). + if test 64 = "${OBJECT_MODE-32}"; then + shared_archive_member_spec=shr_64 + else + shared_archive_member_spec=shr + fi + fi + ;; +*) + with_aix_soname=aix + ;; +esac -hard_links=nottested -if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then - # do not overwrite the value of need_locks provided by the user - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 -$as_echo_n "checking if we can lock with hard links... " >&6; } - hard_links=yes - $RM conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 -$as_echo "$hard_links" >&6; } - if test no = "$hard_links"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 -$as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} - need_locks=warn - fi -else - need_locks=no -fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } - runpath_var= - allow_undefined_flag= - always_export_symbols=no - archive_cmds= - archive_expsym_cmds= - compiler_needs_object=no - enable_shared_with_static_runtimes=no - export_dynamic_flag_spec= - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - hardcode_automatic=no - hardcode_direct=no - hardcode_direct_absolute=no - hardcode_libdir_flag_spec= - hardcode_libdir_separator= - hardcode_minus_L=no - hardcode_shlibpath_var=unsupported - inherit_rpath=no - link_all_deplibs=unknown - module_cmds= - module_expsym_cmds= - old_archive_from_new_cmds= - old_archive_from_expsyms_cmds= - thread_safe_flag_spec= - whole_archive_flag_spec= - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - include_expsyms= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ' (' and ')$', so one must not match beginning or - # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', - # as well as any symbol that contains 'd'. - exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - # Exclude shared library initialization/finalization symbols. - extract_expsyms_cmds= - case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test yes != "$GCC"; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd* | bitrig*) - with_gnu_ld=no - ;; - linux* | k*bsd*-gnu | gnu*) - link_all_deplibs=no - ;; - esac +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS=$ltmain - ld_shlibs=yes +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' - # On some targets, GNU ld is compatible enough with the native linker - # that we're better off using the native interface for both. - lt_use_gnu_ld_interface=no - if test yes = "$with_gnu_ld"; then - case $host_os in - aix*) - # The AIX port of GNU ld has always aspired to compatibility - # with the native linker. However, as the warning in the GNU ld - # block says, versions before 2.19.5* couldn't really create working - # shared libraries, regardless of the interface used. - case `$LD -v 2>&1` in - *\ \(GNU\ Binutils\)\ 2.19.5*) ;; - *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; - *\ \(GNU\ Binutils\)\ [3-9]*) ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - fi - if test yes = "$lt_use_gnu_ld_interface"; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='$wl' - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - export_dynamic_flag_spec='$wl--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then - whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' - else - whole_archive_flag_spec= - fi - supports_anon_versioning=no - case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in - *GNU\ gold*) supports_anon_versioning=yes ;; - *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac - # See if GNU ld supports shared libraries. - case $host_os in - aix[3-9]*) - # On AIX/PPC, the GNU linker is very broken - if test ia64 != "$host_cpu"; then - ld_shlibs=no - cat <<_LT_EOF 1>&2 -*** Warning: the GNU linker, at least up to release 2.19, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to install binutils -*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. -*** You will then need to restart the configuration process. -_LT_EOF - fi - ;; - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='' - ;; - m68k) - archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - beos*) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - allow_undefined_flag=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - else - ld_shlibs=no - fi - ;; - cygwin* | mingw* | pw32* | cegcc*) - # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, - # as there is no search path for DLLs. - hardcode_libdir_flag_spec='-L$libdir' - export_dynamic_flag_spec='$wl--export-all-symbols' - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' - exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file, use it as - # is; otherwise, prepend EXPORTS... - archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - ld_shlibs=no - fi - ;; - haiku*) - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - link_all_deplibs=yes - ;; - os2*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - allow_undefined_flag=unsupported - shrext_cmds=.dll - archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - prefix_cmds="$SED"~ - if test EXPORTS = "`$SED 1q $export_symbols`"; then - prefix_cmds="$prefix_cmds -e 1d"; - fi~ - prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ - cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' - enable_shared_with_static_runtimes=yes - ;; - interix[3-9]*) - hardcode_direct=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='$wl-rpath,$libdir' - export_dynamic_flag_spec='$wl-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) - tmp_diet=no - if test linux-dietlibc = "$host_os"; then - case $cc_basename in - diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) - esac - fi - if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ - && test no = "$tmp_diet" - then - tmp_addflag=' $pic_flag' - tmp_sharedflag='-shared' - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group f77 and f90 compilers - whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - lf95*) # Lahey Fortran 8.1 - whole_archive_flag_spec= - tmp_sharedflag='--shared' ;; - nagfor*) # NAGFOR 5.3 - tmp_sharedflag='-Wl,-shared' ;; - xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) - tmp_sharedflag='-qmkshrobj' - tmp_addflag= ;; - nvcc*) # Cuda Compiler Driver 2.2 - whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - compiler_needs_object=yes - ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - compiler_needs_object=yes - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - esac - archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - if test yes = "$supports_anon_versioning"; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' - fi - case $cc_basename in - tcc*) - export_dynamic_flag_spec='-rdynamic' - ;; - xlf* | bgf* | bgxlf* | mpixlf*) - # IBM XL Fortran 10.1 on PPC cannot create shared libs itself - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test yes = "$supports_anon_versioning"; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac - else - ld_shlibs=no - fi - ;; - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - solaris*) - if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then - ld_shlibs=no - cat <<_LT_EOF 1>&2 -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. -_LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) - ld_shlibs=no - cat <<_LT_EOF 1>&2 -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. -_LT_EOF - ;; - *) - # For security reasons, it is highly recommended that you always - # use absolute paths for naming shared libraries, and exclude the - # DT_RUNPATH tag from executables and libraries. But doing so - # requires that you compile everything twice, which is a pain. - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - esac - ;; - sunos4*) - archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - esac - if test no = "$ld_shlibs"; then - runpath_var= - hardcode_libdir_flag_spec= - export_dynamic_flag_spec= - whole_archive_flag_spec= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - allow_undefined_flag=unsupported - always_export_symbols=yes - archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - hardcode_minus_L=yes - if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - hardcode_direct=unsupported - fi - ;; - aix[4-9]*) - if test ia64 = "$host_cpu"; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag= - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to GNU nm, but means don't demangle to AIX nm. - # Without the "-l" option, or with the "-B" option, AIX nm treats - # weak defined symbols like other global defined symbols, whereas - # GNU nm marks them as "W". - # While the 'weak' keyword is ignored in the Export File, we need - # it in the Import File for the 'aix-soname' feature, so we have - # to replace the "-B" option with "-P" for AIX nm. - if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then - export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' - else - export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # have runtime linking enabled, and use it for executables. - # For shared libraries, we enable/disable runtime linking - # depending on the kind of the shared library created - - # when "with_aix_soname,aix_use_runtimelinking" is: - # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables - # "aix,yes" lib.so shared, rtl:yes, for executables - # lib.a static archive - # "both,no" lib.so.V(shr.o) shared, rtl:yes - # lib.a(lib.so.V) shared, rtl:no, for executables - # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a(lib.so.V) shared, rtl:no - # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a static archive - case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) - for ld_flag in $LDFLAGS; do - if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then - aix_use_runtimelinking=yes - break - fi - done - if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then - # With aix-soname=svr4, we create the lib.so.V shared archives only, - # so we don't have lib.a shared libs to link our executables. - # We have to force runtime linking in this case. - aix_use_runtimelinking=yes - LDFLAGS="$LDFLAGS -Wl,-brtl" - fi - ;; - esac - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. +test -z "$LN_S" && LN_S="ln -s" + - archive_cmds='' - hardcode_direct=yes - hardcode_direct_absolute=yes - hardcode_libdir_separator=':' - link_all_deplibs=yes - file_list_spec='$wl-f,' - case $with_aix_soname,$aix_use_runtimelinking in - aix,*) ;; # traditional, no import file - svr4,* | *,yes) # use import file - # The Import File defines what to hardcode. - hardcode_direct=no - hardcode_direct_absolute=no - ;; - esac - if test yes = "$GCC"; then - case $host_os in aix4.[012]|aix4.[012].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`$CC -print-prog-name=collect2` - if test -f "$collect2name" && - strings "$collect2name" | $GREP resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - hardcode_minus_L=yes - hardcode_libdir_flag_spec='-L$libdir' - hardcode_libdir_separator= - fi - ;; - esac - shared_flag='-shared' - if test yes = "$aix_use_runtimelinking"; then - shared_flag="$shared_flag "'$wl-G' - fi - # Need to ensure runtime linking is disabled for the traditional - # shared library, or the linker may eventually find shared libraries - # /with/ Import File - we do not want to mix them. - shared_flag_aix='-shared' - shared_flag_svr4='-shared $wl-G' - else - # not using gcc - if test ia64 = "$host_cpu"; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test yes = "$aix_use_runtimelinking"; then - shared_flag='$wl-G' - else - shared_flag='$wl-bM:SRE' - fi - shared_flag_aix='$wl-bM:SRE' - shared_flag_svr4='$wl-G' - fi - fi - export_dynamic_flag_spec='$wl-bexpall' - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - always_export_symbols=yes - if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. - if test set = "${lt_cv_aix_libpath+set}"; then - aix_libpath=$lt_cv_aix_libpath -else - if ${lt_cv_aix_libpath_+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\([^ ]*\) *$/\1/ - p - } - }' - lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - # Check for a 64-bit object if we didn't find anything. - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=/usr/lib:/lib - fi -fi - aix_libpath=$lt_cv_aix_libpath_ -fi - hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag - else - if test ia64 = "$host_cpu"; then - hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' - allow_undefined_flag="-z nodefs" - archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an - # empty executable. - if test set = "${lt_cv_aix_libpath+set}"; then - aix_libpath=$lt_cv_aix_libpath -else - if ${lt_cv_aix_libpath_+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int -main () -{ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\([^ ]*\) *$/\1/ - p - } - }' - lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - # Check for a 64-bit object if we didn't find anything. - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=/usr/lib:/lib - fi +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST fi - aix_libpath=$lt_cv_aix_libpath_ +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +printf %s "checking for objdir... " >&6; } +if test ${lt_cv_objdir+y} +then : + printf %s "(cached) " >&6 +else $as_nop + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs fi +rmdir .libs 2>/dev/null +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +printf "%s\n" "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir - hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - no_undefined_flag=' $wl-bernotok' - allow_undefined_flag=' $wl-berok' - if test yes = "$with_gnu_ld"; then - # We only use this code for GNU lds that support --whole-archive. - whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' - else - # Exported symbols can be pulled into shared objects from archives - whole_archive_flag_spec='$convenience' - fi - archive_cmds_need_lc=yes - archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' - # -brtl affects multiple linker settings, -berok does not and is overridden later - compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' - if test svr4 != "$with_aix_soname"; then - # This is similar to how AIX traditionally builds its shared libraries. - archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' - fi - if test aix != "$with_aix_soname"; then - archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' - else - # used by -dlpreopen to get the symbols - archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' - fi - archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' - fi - fi - ;; - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='' - ;; - m68k) - archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - bsdi[45]*) - export_dynamic_flag_spec=-rdynamic - ;; - cygwin* | mingw* | pw32* | cegcc*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - case $cc_basename in - cl*) - # Native MSVC - hardcode_libdir_flag_spec=' ' - allow_undefined_flag=unsupported - always_export_symbols=yes - file_list_spec='@' - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=.dll - # FIXME: Setting linknames here is a bad hack. - archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' - archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then - cp "$export_symbols" "$output_objdir/$soname.def"; - echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; - else - $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; - fi~ - $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ - linknames=' - # The linker will not automatically build a static lib if we build a DLL. - # _LT_TAGVAR(old_archive_from_new_cmds, )='true' - enable_shared_with_static_runtimes=yes - exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' - # Don't use ranlib - old_postinstall_cmds='chmod 644 $oldlib' - postlink_cmds='lt_outputfile="@OUTPUT@"~ - lt_tool_outputfile="@TOOL_OUTPUT@"~ - case $lt_outputfile in - *.exe|*.EXE) ;; - *) - lt_outputfile=$lt_outputfile.exe - lt_tool_outputfile=$lt_tool_outputfile.exe - ;; - esac~ - if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then - $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; - $RM "$lt_outputfile.manifest"; - fi' - ;; - *) - # Assume MSVC wrapper - hardcode_libdir_flag_spec=' ' - allow_undefined_flag=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=.dll - # FIXME: Setting linknames here is a bad hack. - archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - old_archive_from_new_cmds='true' - # FIXME: Should let the user specify the lib program. - old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' - enable_shared_with_static_runtimes=yes - ;; - esac - ;; - darwin* | rhapsody*) +printf "%s\n" "#define LT_OBJDIR \"$lt_cv_objdir/\"" >>confdefs.h - archive_cmds_need_lc=no - hardcode_direct=no - hardcode_automatic=yes - hardcode_shlibpath_var=unsupported - if test yes = "$lt_cv_ld_force_load"; then - whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' - else - whole_archive_flag_spec='' - fi - link_all_deplibs=yes - allow_undefined_flag=$_lt_dar_allow_undefined - case $cc_basename in - ifort*|nagfor*) _lt_dar_can_shared=yes ;; - *) _lt_dar_can_shared=$GCC ;; - esac - if test yes = "$_lt_dar_can_shared"; then - output_verbose_link_cmd=func_echo_all - archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" - module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" - archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" - module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" - else - ld_shlibs=no +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES fi + ;; +esac - ;; +# Global variables: +ofile=libtool +can_build_shared=yes - dgux*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; +# All known linkers require a '.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; +with_gnu_ld=$lt_cv_prog_gnu_ld - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2.*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; +old_CC=$CC +old_CFLAGS=$CFLAGS - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o - hpux9*) - if test yes = "$GCC"; then - archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - fi - hardcode_libdir_flag_spec='$wl+b $wl$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes +func_cc_basename $compiler +cc_basename=$func_cc_basename_result - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - export_dynamic_flag_spec='$wl-E' - ;; - hpux10*) - if test yes,no = "$GCC,$with_gnu_ld"; then - archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test no = "$with_gnu_ld"; then - hardcode_libdir_flag_spec='$wl+b $wl$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - hardcode_direct_absolute=yes - export_dynamic_flag_spec='$wl-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - fi - ;; +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +printf %s "checking for ${ac_tool_prefix}file... " >&6; } +if test ${lt_cv_path_MAGIC_CMD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/${ac_tool_prefix}file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 - hpux11*) - if test yes,no = "$GCC,$with_gnu_ld"; then - case $host_cpu in - hppa*64*) - archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - - # Older versions of the 11.00 compiler do not understand -b yet - # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 -$as_echo_n "checking if $CC understands -b... " >&6; } -if ${lt_cv_prog_compiler__b+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler__b=no - save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS -b" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler__b=yes - fi - else - lt_cv_prog_compiler__b=yes - fi - fi - $RM -r conftest* - LDFLAGS=$save_LDFLAGS +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 -$as_echo "$lt_cv_prog_compiler__b" >&6; } -if test yes = "$lt_cv_prog_compiler__b"; then - archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +printf "%s\n" "$MAGIC_CMD" >&6; } else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi - ;; - esac - fi - if test no = "$with_gnu_ld"; then - hardcode_libdir_flag_spec='$wl+b $wl$libdir' - hardcode_libdir_separator=: - case $host_cpu in - hppa*64*|ia64*) - hardcode_direct=no - hardcode_shlibpath_var=no - ;; - *) - hardcode_direct=yes - hardcode_direct_absolute=yes - export_dynamic_flag_spec='$wl-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - ;; + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +printf %s "checking for file... " >&6; } +if test ${lt_cv_path_MAGIC_CMD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; esac fi - ;; + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac +fi - irix5* | irix6* | nonstopux*) - if test yes = "$GCC"; then - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. - # This should be the same for all languages, so no per-tag cache variable. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -if ${lt_cv_irix_exported_symbol+:} false; then : - $as_echo_n "(cached) " >&6 -else - save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int foo (void) { return 0; } -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_irix_exported_symbol=yes +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +printf "%s\n" "$MAGIC_CMD" >&6; } else - lt_cv_irix_exported_symbol=no + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS=$save_LDFLAGS + + + else + MAGIC_CMD=: + fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -$as_echo "$lt_cv_irix_exported_symbol" >&6; } - if test yes = "$lt_cv_irix_exported_symbol"; then - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' - fi - link_all_deplibs=no - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' - fi - archive_cmds_need_lc='no' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - hardcode_libdir_separator=: - inherit_rpath=yes - link_all_deplibs=yes - ;; - linux*) - case $cc_basename in - tcc*) - # Fabrice Bellard et al's Tiny C Compiler - ld_shlibs=yes - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; + fi + ;; +esac - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; +# Use C for the default configuration in the libtool script - newsos6) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - hardcode_libdir_separator=: - hardcode_shlibpath_var=no - ;; +lt_save_CC=$CC +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu - *nto* | *qnx*) - ;; - openbsd* | bitrig*) - if test -f /usr/libexec/ld.so; then - hardcode_direct=yes - hardcode_shlibpath_var=no - hardcode_direct_absolute=yes - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' - hardcode_libdir_flag_spec='$wl-rpath,$libdir' - export_dynamic_flag_spec='$wl-E' - else - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='$wl-rpath,$libdir' - fi - else - ld_shlibs=no - fi - ;; +# Source file extension for C test sources. +ac_ext=c - os2*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - allow_undefined_flag=unsupported - shrext_cmds=.dll - archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - prefix_cmds="$SED"~ - if test EXPORTS = "`$SED 1q $export_symbols`"; then - prefix_cmds="$prefix_cmds -e 1d"; - fi~ - prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ - cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' - enable_shared_with_static_runtimes=yes - ;; +# Object file extension for compiled C test sources. +objext=o +objext=$objext - osf3*) - if test yes = "$GCC"; then - allow_undefined_flag=' $wl-expect_unresolved $wl\*' - archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - fi - archive_cmds_need_lc='no' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - hardcode_libdir_separator=: - ;; +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test yes = "$GCC"; then - allow_undefined_flag=' $wl-expect_unresolved $wl\*' - archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' - # Both c and cxx compiler support -rpath directly - hardcode_libdir_flag_spec='-rpath $libdir' - fi - archive_cmds_need_lc='no' - hardcode_libdir_separator=: - ;; - - solaris*) - no_undefined_flag=' -z defs' - if test yes = "$GCC"; then - wlarc='$wl' - archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) - wlarc='' - archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' - ;; - *) - wlarc='$wl' - archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - ;; - esac - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_shlibpath_var=no - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands '-z linker_flag'. GCC discards it without '$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test yes = "$GCC"; then - whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' - else - whole_archive_flag_spec='-z allextract$convenience -z defaultextract' - fi - ;; - esac - link_all_deplibs=yes - ;; - - sunos4*) - if test sequent = "$host_vendor"; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - hardcode_libdir_flag_spec='-L$libdir' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; - sysv4) - case $host_vendor in - sni) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' - reload_cmds='$CC -r -o $output$reload_objs' - hardcode_direct=no - ;; - motorola) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - hardcode_shlibpath_var=no - ;; - sysv4.3*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - export_dynamic_flag_spec='-Bexport' - ;; - sysv4*MP*) - if test -d /usr/nec; then - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - ld_shlibs=yes - fi - ;; - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - no_undefined_flag='$wl-z,text' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - runpath_var='LD_RUN_PATH' - if test yes = "$GCC"; then - archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - sysv5* | sco3.2v5* | sco5v6*) - # Note: We CANNOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - no_undefined_flag='$wl-z,text' - allow_undefined_flag='$wl-z,nodefs' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='$wl-R,$libdir' - hardcode_libdir_separator=':' - link_all_deplibs=yes - export_dynamic_flag_spec='$wl-Bexport' - runpath_var='LD_RUN_PATH' +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} - if test yes = "$GCC"; then - archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - uts4*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; +# Allow CC to be a program name with arguments. +compiler=$CC - *) - ld_shlibs=no - ;; - esac +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC - if test sni = "$host_vendor"; then - case $host in - sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - export_dynamic_flag_spec='$wl-Blargedynsym' - ;; - esac - fi - fi +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 -$as_echo "$ld_shlibs" >&6; } -test no = "$ld_shlibs" && can_build_shared=no +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* -with_gnu_ld=$with_gnu_ld +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then +lt_prog_compiler_no_builtin_flag= +if test yes = "$GCC"; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +printf %s "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if test ${lt_cv_prog_compiler_rtti_exceptions+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +printf "%s\n" "$lt_cv_prog_compiler_rtti_exceptions" >&6; } +if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi +fi + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= -# -# Do we need to explicitly link libc? -# -case "x$archive_cmds_need_lc" in -x|xyes) - # Assume -lc should be added - archive_cmds_need_lc=yes + if test yes = "$GCC"; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' - if test yes,yes = "$GCC,$enable_shared"; then - case $archive_cmds in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + lt_prog_compiler_pic='-fPIC' ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 -$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } -if ${lt_cv_archive_cmds_need_lc+:} false; then : - $as_echo_n "(cached) " >&6 -else - $RM conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$lt_prog_compiler_wl - pic_flag=$lt_prog_compiler_pic - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$allow_undefined_flag - allow_undefined_flag= - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 - (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - then - lt_cv_archive_cmds_need_lc=no - else - lt_cv_archive_cmds_need_lc=yes - fi - allow_undefined_flag=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 -$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } - archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac ;; - esac - fi - ;; -esac - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + if test -n "$lt_prog_compiler_pic"; then + lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + case $cc_basename in + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='$wl-a ${wl}archive' + ;; + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + # old Intel for x86_64, which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # flang / f18. f95 an alias for gfortran or flang on Debian + flang* | f18* | f95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ F* | *Sun*Fortran*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Intel*\ [CF]*Compiler*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + *Portland\ Group*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + esac + ;; + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +printf %s "checking for $compiler option to produce PIC... " >&6; } +if test ${lt_cv_prog_compiler_pic+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +printf "%s\n" "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if test ${lt_cv_prog_compiler_pic_works+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +printf "%s\n" "$lt_cv_prog_compiler_pic_works" >&6; } +if test yes = "$lt_cv_prog_compiler_pic_works"; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi +fi @@ -11975,8 +11958,47 @@ esac +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if test ${lt_cv_prog_compiler_static_works+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_static_works=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +printf "%s\n" "$lt_cv_prog_compiler_static_works" >&6; } +if test yes = "$lt_cv_prog_compiler_static_works"; then + : +else + lt_prog_compiler_static= +fi @@ -11984,874 +12006,5657 @@ esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 -$as_echo_n "checking dynamic linker characteristics... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test ${lt_cv_prog_compiler_c_o+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test ${lt_cv_prog_compiler_c_o+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links=nottested +if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +printf %s "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +printf "%s\n" "$hard_links" >&6; } + if test no = "$hard_links"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ' (' and ')$', so one must not match beginning or + # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', + # as well as any symbol that contains 'd'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= -if test yes = "$GCC"; then - case $host_os in - darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; - *) lt_awk_arg='/^libraries:/' ;; - esac case $host_os in - mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; - *) lt_sed_strip_eq='s|=/|/|g' ;; - esac - lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` - case $lt_search_path_spec in - *\;*) - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test yes != "$GCC"; then + with_gnu_ld=no + fi ;; - *) - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes ;; - esac - # Ok, now we have the path, separated by spaces, we can step through it - # and add multilib dir if necessary... - lt_tmp_lt_search_path_spec= - lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` - # ...but if some path component already ends with the multilib dir we assume - # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). - case "$lt_multi_os_dir; $lt_search_path_spec " in - "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) - lt_multi_os_dir= + openbsd* | bitrig*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs=no ;; esac - for lt_sys_path in $lt_search_path_spec; do - if test -d "$lt_sys_path$lt_multi_os_dir"; then - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" - elif test -n "$lt_multi_os_dir"; then - test -d "$lt_sys_path" && \ - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" - fi - done - lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' -BEGIN {RS = " "; FS = "/|\n";} { - lt_foo = ""; - lt_count = 0; - for (lt_i = NF; lt_i > 0; lt_i--) { - if ($lt_i != "" && $lt_i != ".") { - if ($lt_i == "..") { - lt_count++; - } else { - if (lt_count == 0) { - lt_foo = "/" $lt_i lt_foo; - } else { - lt_count--; - } - } - } - } - if (lt_foo != "") { lt_freq[lt_foo]++; } - if (lt_freq[lt_foo] == 1) { print lt_foo; } -}'` - # AWK program above erroneously prepends '/' to C:/dos/paths - # for these hosts. - case $host_os in - mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ - $SED 's|/\([A-Za-z]:\)|\1|g'` ;; - esac - sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=.so -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -need_lib_prefix=unknown -hardcode_into_libs=no -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown + ld_shlibs=yes + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test yes = "$with_gnu_ld"; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test yes = "$lt_use_gnu_ld_interface"; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='$wl' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + export_dynamic_flag_spec='$wl--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test ia64 != "$host_cpu"; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='$wl--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test linux-dietlibc = "$host_os"; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test no = "$tmp_diet" + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + nagfor*) # NAGFOR 5.3 + tmp_sharedflag='-Wl,-shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + tcc*) + export_dynamic_flag_spec='-rdynamic' + ;; + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test no = "$ld_shlibs"; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then + aix_use_runtimelinking=yes + break + fi + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # traditional, no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + hardcode_direct=no + hardcode_direct_absolute=no + ;; + esac + + if test yes = "$GCC"; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag="$shared_flag "'$wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + export_dynamic_flag_spec='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if test ${lt_cv_aix_libpath_+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if test ${lt_cv_aix_libpath_+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' $wl-bernotok' + allow_undefined_flag=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test yes = "$lt_cv_ld_force_load"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test yes = "$GCC"; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='$wl-E' + ;; + + hpux10*) + if test yes,no = "$GCC,$with_gnu_ld"; then + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test yes,no = "$GCC,$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +printf %s "checking if $CC understands -b... " >&6; } +if test ${lt_cv_prog_compiler__b+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler__b=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +printf "%s\n" "$lt_cv_prog_compiler__b" >&6; } + +if test yes = "$lt_cv_prog_compiler__b"; then + archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test yes = "$GCC"; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +printf %s "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if test ${lt_cv_irix_exported_symbol+y} +then : + printf %s "(cached) " >&6 +else $as_nop + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + lt_cv_irix_exported_symbol=yes +else $as_nop + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; } + if test yes = "$lt_cv_irix_exported_symbol"; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' + fi + link_all_deplibs=no + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + linux*) + case $cc_basename in + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + ld_shlibs=yes + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + else + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + ;; + + osf3*) + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test yes = "$GCC"; then + wlarc='$wl' + archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='$wl' + archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. GCC discards it without '$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test yes = "$GCC"; then + whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test sequent = "$host_vendor"; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='$wl-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='$wl-z,text' + allow_undefined_flag='$wl-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test sni = "$host_vendor"; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='$wl-Blargedynsym' + ;; + esac + fi + fi + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +printf "%s\n" "$ld_shlibs" >&6; } +test no = "$ld_shlibs" && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test yes,yes = "$GCC,$enable_shared"; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +printf %s "checking whether -lc should be explicitly linked in... " >&6; } +if test ${lt_cv_archive_cmds_need_lc+y} +then : + printf %s "(cached) " >&6 +else $as_nop + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +printf "%s\n" "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +printf %s "checking dynamic linker characteristics... " >&6; } + +if test yes = "$GCC"; then + case $host_os in + darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; + *) lt_awk_arg='/^libraries:/' ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; + *) lt_sed_strip_eq='s|=/|/|g' ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary... + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + # ...but if some path component already ends with the multilib dir we assume + # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). + case "$lt_multi_os_dir; $lt_search_path_spec " in + "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) + lt_multi_os_dir= + ;; + esac + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" + elif test -n "$lt_multi_os_dir"; then + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS = " "; FS = "/|\n";} { + lt_foo = ""; + lt_count = 0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo = "/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's|/\([A-Za-z]:\)|\1|g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + + + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a(lib.so.V)' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + hardcode_libdir_flag_spec='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if test ${lt_cv_shlibpath_overrides_runpath+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null +then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +printf "%s\n" "$dynamic_linker" >&6; } +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi + +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +fi + +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec + +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" + +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +printf %s "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test yes = "$hardcode_automatic"; then + + # We can hardcode non-existent directories. + if test no != "$hardcode_direct" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && + test no != "$hardcode_minus_L"; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +printf "%s\n" "$hardcode_action" >&6; } + +if test relink = "$hardcode_action" || + test yes = "$inherit_rpath"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test yes != "$enable_dlopen"; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen=load_add_on + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen=LoadLibrary + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +printf %s "checking for dlopen in -ldl... " >&6; } +if test ${ac_cv_lib_dl_dlopen+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main (void) +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dl_dlopen=yes +else $as_nop + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes +then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else $as_nop + + lt_cv_dlopen=dyld + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + tpf*) + # Don't try to run any link tests for TPF. We know it's impossible + # because TPF is a cross-compiler, and we know how we open DSOs. + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + lt_cv_dlopen_self=no + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes +then : + lt_cv_dlopen=shl_load +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +printf %s "checking for shl_load in -ldld... " >&6; } +if test ${ac_cv_lib_dld_shl_load+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char shl_load (); +int +main (void) +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dld_shl_load=yes +else $as_nop + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +printf "%s\n" "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes +then : + lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld +else $as_nop + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes +then : + lt_cv_dlopen=dlopen +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +printf %s "checking for dlopen in -ldl... " >&6; } +if test ${ac_cv_lib_dl_dlopen+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main (void) +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dl_dlopen=yes +else $as_nop + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes +then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +printf %s "checking for dlopen in -lsvld... " >&6; } +if test ${ac_cv_lib_svld_dlopen+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dlopen (); +int +main (void) +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_svld_dlopen=yes +else $as_nop + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +printf "%s\n" "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes +then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +printf %s "checking for dld_link in -ldld... " >&6; } +if test ${ac_cv_lib_dld_dld_link+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +char dld_link (); +int +main (void) +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_lib_dld_dld_link=yes +else $as_nop + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +printf "%s\n" "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes +then : + lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test no = "$lt_cv_dlopen"; then + enable_dlopen=no + else + enable_dlopen=yes + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS=$CPPFLAGS + test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS=$LDFLAGS + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS=$LIBS + LIBS="$lt_cv_dlopen_libs $LIBS" + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +printf %s "checking whether a program can dlopen itself... " >&6; } +if test ${lt_cv_dlopen_self+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +printf "%s\n" "$lt_cv_dlopen_self" >&6; } + + if test yes = "$lt_cv_dlopen_self"; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +printf %s "checking whether a statically linked program can dlopen itself... " >&6; } +if test ${lt_cv_dlopen_self_static+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +printf "%s\n" "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS=$save_CPPFLAGS + LDFLAGS=$save_LDFLAGS + LIBS=$save_LIBS + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +printf %s "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP"; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } + else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } + fi + ;; + *) + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } + ;; + esac +fi + + + + + + + + + + + + + # Report what library types will actually be built + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +printf %s "checking if libtool supports shared libraries... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +printf "%s\n" "$can_build_shared" >&6; } + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +printf %s "checking whether to build shared libraries... " >&6; } + test no = "$can_build_shared" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test yes = "$enable_shared" && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test ia64 != "$host_cpu"; then + case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in + yes,aix,yes) ;; # shared object as lib.so file only + yes,svr4,*) ;; # shared object as lib.so archive member only + yes,*) enable_static=no ;; # shared object in lib.a archive as well + esac + fi + ;; + esac + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +printf "%s\n" "$enable_shared" >&6; } + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +printf %s "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test yes = "$enable_shared" || enable_static=yes + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +printf "%s\n" "$enable_static" >&6; } + + + + +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC=$lt_save_CC + + if test -n "$CXX" && ( test no != "$CXX" && + ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || + (test g++ != "$CXX"))); then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +printf %s "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if test ${ac_cv_prog_CXXCPP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + # Double quotes because $CXX needs to be expanded + for CXXCPP in "$CXX -E" cpp /lib/cpp + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO" +then : + +else $as_nop + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO" +then : + # Broken: success on invalid input. +continue +else $as_nop + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok +then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +printf "%s\n" "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO" +then : + +else $as_nop + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO" +then : + # Broken: success on invalid input. +continue +else $as_nop + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok +then : + +else $as_nop + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +else + _lt_caught_CXX_error=yes +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +compiler_needs_object_CXX=no +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_direct_absolute_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +inherit_rpath_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +reload_flag_CXX=$reload_flag +reload_cmds_CXX=$reload_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test yes != "$_lt_caught_CXX_error"; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + + # save warnings/boilerplate of simple test code + ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + + ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC + func_cc_basename $compiler +cc_basename=$func_cc_basename_result + + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test yes = "$GXX"; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' + else + lt_prog_compiler_no_builtin_flag_CXX= + fi + + if test yes = "$GXX"; then + # Set up default GNU C++ configuration + + + +# Check whether --with-gnu-ld was given. +if test ${with_gnu_ld+y} +then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else $as_nop + with_gnu_ld=no +fi + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +printf %s "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +printf %s "checking for GNU ld... " >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +printf %s "checking for non-GNU ld... " >&6; } +fi +if test ${lt_cv_path_LD+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +printf "%s\n" "$LD" >&6; } +else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +printf %s "checking if the linker ($LD) is GNU ld... " >&6; } +if test ${lt_cv_prog_gnu_ld+y} +then : + printf %s "(cached) " >&6 +else $as_nop + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test yes = "$with_gnu_ld"; then + archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='$wl' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + ld_shlibs_CXX=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix[4-9]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + file_list_spec_CXX='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + hardcode_direct_CXX=no + hardcode_direct_absolute_CXX=no + ;; + esac + + if test yes = "$GXX"; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag=$shared_flag' $wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + export_dynamic_flag_spec_CXX='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + always_export_symbols_CXX=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + # The "-G" linker flag allows undefined symbols. + no_undefined_flag_CXX='-bernotok' + # Determine the default libpath from the value encoded in an empty + # executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if test ${lt_cv_aix_libpath__CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO" +then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if test ${lt_cv_aix_libpath__CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO" +then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' $wl-bernotok' + allow_undefined_flag_CXX=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + fi + archive_cmds_need_lc_CXX=yes + archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared + # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_CXX=' ' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=yes + file_list_spec_CXX='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' + enable_shared_with_static_runtimes_CXX=yes + # Don't use ranlib + old_postinstall_cmds_CXX='chmod 644 $oldlib' + postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + export_dynamic_flag_spec_CXX='$wl--export-all-symbols' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + + + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + if test yes = "$lt_cv_ld_force_load"; then + whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec_CXX='' + fi + link_all_deplibs_CXX=yes + allow_undefined_flag_CXX=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + module_expsym_cmds_CXX="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + if test yes != "$lt_cv_apple_cc_single_mod"; then + archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" + archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" + fi + + else + ld_shlibs_CXX=no + fi + + ;; + + os2*) + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_minus_L_CXX=yes + allow_undefined_flag_CXX=unsupported + shrext_cmds=.dll + archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes_CXX=yes + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + ld_shlibs_CXX=no + ;; + + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + + haiku*) + archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + link_all_deplibs_CXX=yes + ;; + + hpux9*) + hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='$wl-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + export_dynamic_flag_spec_CXX='$wl-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + interix[3-9]*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + hardcode_libdir_separator_CXX=: + inherit_rpath_CXX=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [1-5].* | *pgcpp\ [1-5].*) + prelink_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' + hardcode_libdir_flag_spec_CXX='-R$libdir' + whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object_CXX=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + ld_shlibs_CXX=yes + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + hardcode_direct_absolute_CXX=yes + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='$wl-E' + whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + ld_shlibs_CXX=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + case $host in + osf3*) + allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' + archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + ;; + *) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ + $RM $lib.exp' + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + ;; + esac + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes,no = "$GXX,$with_gnu_ld"; then + allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' + case $host in + osf3*) + archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + *) + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; -case $host_os in -aix3*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname.a' - shlibpath_var=LIBPATH + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='$libname$release$shared_ext$major' - ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; -aix[4-9]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test ia64 = "$host_cpu"; then - # AIX 5 supports IA64 - library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line '#! .'. This would cause the generated library to - # depend on '.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[01] | aix4.[01].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # Using Import Files as archive members, it is possible to support - # filename-based versioning of shared library archives on AIX. While - # this would work for both with and without runtime linking, it will - # prevent static linking of such archives. So we do filename-based - # shared library versioning with .so extension only, which is used - # when both runtime linking and shared linking is enabled. - # Unfortunately, runtime linking may impact performance, so we do - # not want this to be the default eventually. Also, we use the - # versioned .so libs for executables only if there is the -brtl - # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. - # To allow for filename-based versioning support, we need to create - # libNAME.so.V as an archive file, containing: - # *) an Import File, referring to the versioned filename of the - # archive as well as the shared archive member, telling the - # bitwidth (32 or 64) of that shared object, and providing the - # list of exported symbols of that shared object, eventually - # decorated with the 'weak' keyword - # *) the shared object with the F_LOADONLY flag set, to really avoid - # it being seen by the linker. - # At run time we better use the real file rather than another symlink, - # but for link time we create the symlink libNAME.so -> libNAME.so.V + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' + ;; + esac + link_all_deplibs_CXX=yes - case $with_aix_soname,$aix_use_runtimelinking in - # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - aix,yes) # traditional libtool - dynamic_linker='AIX unversionable lib.so' - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - ;; - aix,no) # traditional AIX only - dynamic_linker='AIX lib.a(lib.so.V)' - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='$libname$release.a $libname.a' - soname_spec='$libname$release$shared_ext$major' - ;; - svr4,*) # full svr4 only - dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" - library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' - # We do not specify a path in Import Files, so LIBPATH fires. - shlibpath_overrides_runpath=yes - ;; - *,yes) # both, prefer svr4 - dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" - library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' - # unpreferred sharedlib libNAME.a needs extra handling - postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' - postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' - # We do not specify a path in Import Files, so LIBPATH fires. - shlibpath_overrides_runpath=yes - ;; - *,no) # both, prefer aix - dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" - library_names_spec='$libname$release.a $libname.a' - soname_spec='$libname$release$shared_ext$major' - # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling - postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' - postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' - ;; - esac - shlibpath_var=LIBPATH - fi - ;; + output_verbose_link_cmd='func_echo_all' -amigaos*) - case $host_cpu in - powerpc) - # Since July 2007 AmigaOS4 officially supports .so libraries. - # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - ;; - m68k) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; - esac - ;; + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' -beos*) - library_names_spec='$libname$shared_ext' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test yes,no = "$GXX,$with_gnu_ld"; then + no_undefined_flag_CXX=' $wl-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + else + # g++ 2.7 appears to require '-G' NOT '-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + fi -bsdi[45]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; + hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; -cygwin* | mingw* | pw32* | cegcc*) - version_type=windows - shrext_cmds=.dll - need_version=no - need_lib_prefix=no + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='$wl-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' - case $GCC,$cc_basename in - yes,*) - # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_CXX='$wl-z,text' + allow_undefined_flag_CXX='$wl-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='$wl-Bexport' + runpath_var='LD_RUN_PATH' - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" - ;; - mingw* | cegcc*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ + '"$old_archive_cmds_CXX" + reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ + '"$reload_cmds_CXX" + ;; + *) + archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac ;; - esac - dynamic_linker='Win32 ld.exe' - ;; - *,cl*) - # Native MSVC - libname_spec='$name' - soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' - library_names_spec='$libname.dll.lib' + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; - case $build_os in - mingw*) - sys_lib_search_path_spec= - lt_save_ifs=$IFS - IFS=';' - for lt_path in $LIB - do - IFS=$lt_save_ifs - # Let DOS variable expansion print the short 8.3 style file name. - lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` - sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" - done - IFS=$lt_save_ifs - # Convert to MSYS style. - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` - ;; - cygwin*) - # Convert to unix form, then to dos form, then back to unix form - # but this time dos style (no spaces!) so that the unix form looks - # like /cygdrive/c/PROGRA~1:/cygdr... - sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` - sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` - sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - ;; - *) - sys_lib_search_path_spec=$LIB - if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then - # It is most probably a Windows format PATH. - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - # FIXME: find the short name or the path components, as spaces are - # common. (e.g. "Program Files" -> "PROGRA~1") - ;; + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; esac - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes - dynamic_linker='Win32 link.exe' - ;; + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +printf "%s\n" "$ld_shlibs_CXX" >&6; } + test no = "$ld_shlibs_CXX" && can_build_shared=no + + GCC_CXX=$GXX + LD_CXX=$LD + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + # Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF + - *) - # Assume MSVC wrapper - library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' - dynamic_linker='Win32 ld.exe' - ;; - esac - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' - soname_spec='$libname$release$major$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case $prev$p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test x-L = "$p" || + test x-R = "$p"; then + prev=$p + continue + fi - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test no = "$pre_test_object_deps_done"; then + case $prev in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX=$prev$p + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX=$prev$p + else + postdeps_CXX="${postdeps_CXX} $prev$p" + fi + fi + prev= + ;; -dgux*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - ;; + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test no = "$pre_test_object_deps_done"; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX=$p + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX=$p + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[23].*) objformat=aout ;; - *) objformat=elf ;; esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2.*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[01]* | freebsdelf3.[01]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ - freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; + done -haiku*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - dynamic_linker="$host_os runtime_loader" - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=no - sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - if test 32 = "$HPUX_IA64_MODE"; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - sys_lib_dlsearch_path_spec=/usr/lib/hpux32 - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - sys_lib_dlsearch_path_spec=/usr/lib/hpux64 - fi - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555, ... - postinstall_cmds='chmod 555 $lib' - # or fails outright, so override atomically: - install_override_mode=555 - ;; +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS +# PORTME: override above test on systems where it is broken +case $host_os in interix[3-9]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + compiler_lib_search_dirs_CXX= +if test -n "${compiler_lib_search_path_CXX}"; then + compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` +fi + + + + + + + + + + + + + + + + + + + + + + + -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test yes = "$lt_cv_prog_gnu_ld"; then - version_type=linux # correct to gnu/linux during the next big refactor - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='$libname$release$shared_ext$major' - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" - sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" - hardcode_into_libs=yes - ;; -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; -linux*android*) - version_type=none # Android doesn't support versioned libraries. - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext' - soname_spec='$libname$release$shared_ext' - finish_cmds= - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - dynamic_linker='Android linker' - # Don't embed -rpath directories since the linker doesn't support them. - hardcode_libdir_flag_spec='-L$libdir' - ;; -# This must be glibc/ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - # Some binutils ld are patched to set DT_RUNPATH - if ${lt_cv_shlibpath_overrides_runpath+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_shlibpath_overrides_runpath=no - save_LDFLAGS=$LDFLAGS - save_libdir=$libdir - eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ - LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int -main () -{ + lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : - lt_cv_shlibpath_overrides_runpath=yes -fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS=$save_LDFLAGS - libdir=$save_libdir -fi + # C++ specific cases for pic, static, wl, etc. + if test yes = "$GXX"; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' - shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + lt_prog_compiler_pic_CXX='-fPIC' + ;; - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic_CXX='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; - # Ideally, we could use ldconfig to report *all* directores which are - # searched for libraries, however this is still not possible. Aside from not - # being certain /sbin/ldconfig is available, command - # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, - # even though it is searched at run-time. Try to do the best guess by - # appending ld.so.conf contents (and includes) to the search path. - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static_CXX='$wl-static' + ;; + esac + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static_CXX= + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix[4-9]*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='$wl-a ${wl}archive' + if test ia64 != "$host_cpu"; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='$wl-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64, which still supported -KPIC. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-qpic' + lt_prog_compiler_static_CXX='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac fi - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +printf %s "checking for $compiler option to produce PIC... " >&6; } +if test ${lt_cv_prog_compiler_pic_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 +printf "%s\n" "$lt_cv_prog_compiler_pic_CXX" >&6; } +lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } +if test ${lt_cv_prog_compiler_pic_works_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works_CXX=yes + fi + fi + $RM conftest* -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +printf "%s\n" "$lt_cv_prog_compiler_pic_works_CXX" >&6; } -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; +if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi -newsos6) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; +fi -*nto* | *qnx*) - version_type=qnx - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='ldqnx.so' - ;; -openbsd* | bitrig*) - version_type=sunos - sys_lib_dlsearch_path_spec=/usr/lib - need_lib_prefix=no - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - need_version=no - else - need_version=yes - fi - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; -os2*) - libname_spec='$name' - version_type=windows - shrext_cmds=.dll - need_version=no - need_lib_prefix=no - # OS/2 can only load a DLL with a base name of 8 characters or less. - soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; - v=$($ECHO $release$versuffix | tr -d .-); - n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); - $ECHO $n$v`$shared_ext' - library_names_spec='${libname}_dll.$libext' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=BEGINLIBPATH - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - ;; -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='$libname$release$shared_ext$major' - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; -rdos*) - dynamic_linker=no - ;; +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if test ${lt_cv_prog_compiler_static_works_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_static_works_CXX=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works_CXX=yes + fi + else + lt_cv_prog_compiler_static_works_CXX=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS -solaris*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +printf "%s\n" "$lt_cv_prog_compiler_static_works_CXX" >&6; } -sunos4*) - version_type=sunos - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test yes = "$with_gnu_ld"; then - need_lib_prefix=no - fi - need_version=yes - ;; +if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then + : +else + lt_prog_compiler_static_CXX= +fi -sysv4 | sysv4.3*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; -sysv4*MP*) - if test -d /usr/nec; then - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' - soname_spec='$libname$shared_ext.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=sco - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - if test yes = "$with_gnu_ld"; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac - fi - sys_lib_dlsearch_path_spec='/usr/lib' - ;; -tpf*) - # TPF is a cross-target only. Preferred cross-host = GNU/Linux. - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test ${lt_cv_prog_compiler_c_o_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* -uts4*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - ;; +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; } -*) - dynamic_linker=no - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 -$as_echo "$dynamic_linker" >&6; } -test no = "$dynamic_linker" && can_build_shared=no -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test yes = "$GCC"; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi -if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then - sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec -fi + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if test ${lt_cv_prog_compiler_c_o_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* -if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then - sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; } -# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... -configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec -# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code -func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" -# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool -configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + +hard_links=nottested +if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +printf %s "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +printf "%s\n" "$hard_links" >&6; } + if test no = "$hard_links"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX=$ltdll_cmds + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs_CXX=no + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +printf "%s\n" "$ld_shlibs_CXX" >&6; } +test no = "$ld_shlibs_CXX" && can_build_shared=no +with_gnu_ld_CXX=$with_gnu_ld +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + if test yes,yes = "$GCC,$enable_shared"; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +printf %s "checking whether -lc should be explicitly linked in... " >&6; } +if test ${lt_cv_archive_cmds_need_lc_CXX+y} +then : + printf %s "(cached) " >&6 +else $as_nop + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc_CXX=no + else + lt_cv_archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 +printf "%s\n" "$lt_cv_archive_cmds_need_lc_CXX" >&6; } + archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX + ;; + esac + fi + ;; +esac @@ -12914,582 +17719,802 @@ configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +printf %s "checking dynamic linker characteristics... " >&6; } +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a(lib.so.V)' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + *) + # Assume MSVC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 -$as_echo_n "checking how to hardcode library paths into programs... " >&6; } -hardcode_action= -if test -n "$hardcode_libdir_flag_spec" || - test -n "$runpath_var" || - test yes = "$hardcode_automatic"; then +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; - # We can hardcode non-existent directories. - if test no != "$hardcode_direct" && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && - test no != "$hardcode_minus_L"; then - # Linking always hardcodes the temporary library directory. - hardcode_action=relink +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action=immediate + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action=unsupported -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 -$as_echo "$hardcode_action" >&6; } - -if test relink = "$hardcode_action" || - test yes = "$inherit_rpath"; then - # Fast installation is not supported - enable_fast_install=no -elif test yes = "$shlibpath_overrides_runpath" || - test no = "$enable_shared"; then - # Fast installation is not necessary - enable_fast_install=needless -fi - - - - - - - if test yes != "$enable_dlopen"; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH case $host_os in - beos*) - lt_cv_dlopen=load_add_on - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; - mingw* | pw32* | cegcc*) - lt_cv_dlopen=LoadLibrary - lt_cv_dlopen_libs= +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi ;; - - cygwin*) - lt_cv_dlopen=dlopen - lt_cv_dlopen_libs= + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; - - darwin*) - # if libdl is installed we need to link against it - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 -$as_echo_n "checking for dlopen in -ldl... " >&6; } -if ${ac_cv_lib_dl_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dl_dlopen=yes -else - ac_cv_lib_dl_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 -$as_echo "$ac_cv_lib_dl_dlopen" >&6; } -if test "x$ac_cv_lib_dl_dlopen" = xyes; then : - lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl -else - - lt_cv_dlopen=dyld - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - -fi - + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; - tpf*) - # Don't try to run any link tests for TPF. We know it's impossible - # because TPF is a cross-compiler, and we know how we open DSOs. - lt_cv_dlopen=dlopen - lt_cv_dlopen_libs= - lt_cv_dlopen_self=no - ;; +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; *) - ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" -if test "x$ac_cv_func_shl_load" = xyes; then : - lt_cv_dlopen=shl_load -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 -$as_echo_n "checking for shl_load in -ldld... " >&6; } -if ${ac_cv_lib_dld_shl_load+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shl_load (); -int -main () -{ -return shl_load (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dld_shl_load=yes -else - ac_cv_lib_dld_shl_load=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 -$as_echo "$ac_cv_lib_dld_shl_load" >&6; } -if test "x$ac_cv_lib_dld_shl_load" = xyes; then : - lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld -else - ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" -if test "x$ac_cv_func_dlopen" = xyes; then : - lt_cv_dlopen=dlopen -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 -$as_echo_n "checking for dlopen in -ldl... " >&6; } -if ${ac_cv_lib_dl_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dl_dlopen=yes -else - ac_cv_lib_dl_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 -$as_echo "$ac_cv_lib_dl_dlopen" >&6; } -if test "x$ac_cv_lib_dl_dlopen" = xyes; then : - lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 -$as_echo_n "checking for dlopen in -lsvld... " >&6; } -if ${ac_cv_lib_svld_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lsvld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_svld_dlopen=yes -else - ac_cv_lib_svld_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 -$as_echo "$ac_cv_lib_svld_dlopen" >&6; } -if test "x$ac_cv_lib_svld_dlopen" = xyes; then : - lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 -$as_echo_n "checking for dld_link in -ldld... " >&6; } -if ${ac_cv_lib_dld_dld_link+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + hardcode_libdir_flag_spec_CXX='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if test ${lt_cv_shlibpath_overrides_runpath+y} +then : + printf %s "(cached) " >&6 +else $as_nop + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dld_link (); int -main () +main (void) { -return dld_link (); + ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dld_dld_link=yes -else - ac_cv_lib_dld_dld_link=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 -$as_echo "$ac_cv_lib_dld_dld_link" >&6; } -if test "x$ac_cv_lib_dld_dld_link" = xyes; then : - lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld -fi - - +if ac_fn_cxx_try_link "$LINENO" +then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null +then : + lt_cv_shlibpath_overrides_runpath=yes fi - - fi - +rm -f core conftest.err conftest.$ac_objext conftest.beam \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir fi + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath -fi + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi -fi + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; - ;; - esac +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; - if test no = "$lt_cv_dlopen"; then - enable_dlopen=no +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' else - enable_dlopen=yes + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS=$CPPFLAGS - test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; - save_LDFLAGS=$LDFLAGS - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; - save_LIBS=$LIBS - LIBS="$lt_cv_dlopen_libs $LIBS" +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 -$as_echo_n "checking whether a program can dlopen itself... " >&6; } -if ${lt_cv_dlopen_self+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test yes = "$cross_compiling"; then : - lt_cv_dlopen_self=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -#line $LINENO "configure" -#include "confdefs.h" +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; -#if HAVE_DLFCN_H -#include -#endif +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; -#include +rdos*) + dynamic_linker=no + ;; -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; -/* When -fvisibility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ -#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) -int fnord () __attribute__((visibility("default"))); -#endif +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; -int fnord () { return 42; } -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else - { - if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - else puts (dlerror ()); - } - /* dlclose (self); */ - } +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else - puts (dlerror ()); - - return status; -} -_LT_EOF - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; esac - else : - # compilation failed - lt_cv_dlopen_self=no fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +printf "%s\n" "$dynamic_linker" >&6; } +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi -rm -fr conftest* +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 -$as_echo "$lt_cv_dlopen_self" >&6; } - if test yes = "$lt_cv_dlopen_self"; then - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 -$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } -if ${lt_cv_dlopen_self_static+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test yes = "$cross_compiling"; then : - lt_cv_dlopen_self_static=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -#line $LINENO "configure" -#include "confdefs.h" +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec -#if HAVE_DLFCN_H -#include -#endif +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" -#include +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif -/* When -fvisibility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ -#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) -int fnord () __attribute__((visibility("default"))); -#endif -int fnord () { return 42; } -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else - { - if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - else puts (dlerror ()); - } - /* dlclose (self); */ - } - else - puts (dlerror ()); - return status; -} -_LT_EOF - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; - esac - else : - # compilation failed - lt_cv_dlopen_self_static=no - fi -fi -rm -fr conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 -$as_echo "$lt_cv_dlopen_self_static" >&6; } - fi - CPPFLAGS=$save_CPPFLAGS - LDFLAGS=$save_LDFLAGS - LIBS=$save_LIBS - ;; - esac - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; - esac -fi @@ -13507,35 +18532,6 @@ fi -striplib= -old_striplib= -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 -$as_echo_n "checking whether stripping libraries is possible... " >&6; } -if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP"; then - striplib="$STRIP -x" - old_striplib="$STRIP -S" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - fi - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - ;; - esac -fi @@ -13548,59 +18544,70 @@ fi - # Report what library types will actually be built - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 -$as_echo_n "checking if libtool supports shared libraries... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 -$as_echo "$can_build_shared" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +printf %s "checking how to hardcode library paths into programs... " >&6; } +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || + test -n "$runpath_var_CXX" || + test yes = "$hardcode_automatic_CXX"; then + + # We can hardcode non-existent directories. + if test no != "$hardcode_direct_CXX" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && + test no != "$hardcode_minus_L_CXX"; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 +printf "%s\n" "$hardcode_action_CXX" >&6; } + +if test relink = "$hardcode_action_CXX" || + test yes = "$inherit_rpath_CXX"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi + - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 -$as_echo_n "checking whether to build shared libraries... " >&6; } - test no = "$can_build_shared" && enable_shared=no - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test yes = "$enable_shared" && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - aix[4-9]*) - if test ia64 != "$host_cpu"; then - case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in - yes,aix,yes) ;; # shared object as lib.so file only - yes,svr4,*) ;; # shared object as lib.so archive member only - yes,*) enable_static=no ;; # shared object in lib.a archive as well - esac - fi - ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 -$as_echo "$enable_shared" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 -$as_echo_n "checking whether to build static libraries... " >&6; } - # Make sure either enable_shared or enable_static is yes. - test yes = "$enable_shared" || enable_static=yes - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 -$as_echo "$enable_static" >&6; } + fi # test -n "$compiler" + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test yes != "$_lt_caught_CXX_error" -fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -CC=$lt_save_CC - @@ -13623,11 +18630,12 @@ CC=$lt_save_CC # Only expand once: - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 -$as_echo_n "checking whether byte ordering is bigendian... " >&6; } -if ${ac_cv_c_bigendian+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 +printf %s "checking whether byte ordering is bigendian... " >&6; } +if test ${ac_cv_c_bigendian+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -13638,7 +18646,8 @@ else typedef int dummy; _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. @@ -13662,7 +18671,7 @@ if ac_fn_c_try_compile "$LINENO"; then : fi done fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -13671,7 +18680,7 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext #include int -main () +main (void) { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ @@ -13683,7 +18692,8 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -13691,7 +18701,7 @@ if ac_fn_c_try_compile "$LINENO"; then : #include int -main () +main (void) { #if BYTE_ORDER != BIG_ENDIAN not big endian @@ -13701,14 +18711,15 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_c_bigendian=yes -else +else $as_nop ac_cv_c_bigendian=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). @@ -13717,7 +18728,7 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext #include int -main () +main (void) { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros @@ -13727,14 +18738,15 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int -main () +main (void) { #ifndef _BIG_ENDIAN not big endian @@ -13744,31 +18756,33 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_c_bigendian=yes -else +else $as_nop ac_cv_c_bigendian=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. - if test "$cross_compiling" = yes; then : + if test "$cross_compiling" = yes +then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -short int ascii_mm[] = +unsigned short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; - short int ascii_ii[] = + unsigned short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } - short int ebcdic_ii[] = + unsigned short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; - short int ebcdic_mm[] = + unsigned short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; @@ -13776,14 +18790,15 @@ short int ascii_mm[] = extern int foo; int -main () +main (void) { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi @@ -13796,13 +18811,13 @@ if ac_fn_c_try_compile "$LINENO"; then : fi fi fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -else +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int -main () +main (void) { /* Are we little or big endian? From Harbison&Steele. */ @@ -13818,9 +18833,10 @@ main () return 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : +if ac_fn_c_try_run "$LINENO" +then : ac_cv_c_bigendian=no -else +else $as_nop ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ @@ -13829,17 +18845,17 @@ fi fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 -$as_echo "$ac_cv_c_bigendian" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 +printf "%s\n" "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) - $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h + printf "%s\n" "#define WORDS_BIGENDIAN 1" >>confdefs.h ;; #( no) ;; #( universal) -$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h +printf "%s\n" "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) @@ -13849,17 +18865,19 @@ $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h # Check whether --enable-largefile was given. -if test "${enable_largefile+set}" = set; then : +if test ${enable_largefile+y} +then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 -$as_echo_n "checking for special C compiler options needed for large files... " >&6; } -if ${ac_cv_sys_largefile_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 +printf %s "checking for special C compiler options needed for large files... " >&6; } +if test ${ac_cv_sys_largefile_CC+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC @@ -13873,44 +18891,47 @@ else We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) +#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int -main () +main (void) { ; return 0; } _ACEOF - if ac_fn_c_try_compile "$LINENO"; then : + if ac_fn_c_try_compile "$LINENO" +then : break fi -rm -f core conftest.err conftest.$ac_objext +rm -f core conftest.err conftest.$ac_objext conftest.beam CC="$CC -n32" - if ac_fn_c_try_compile "$LINENO"; then : + if ac_fn_c_try_compile "$LINENO" +then : ac_cv_sys_largefile_CC=' -n32'; break fi -rm -f core conftest.err conftest.$ac_objext +rm -f core conftest.err conftest.$ac_objext conftest.beam break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 -$as_echo "$ac_cv_sys_largefile_CC" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 +printf "%s\n" "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 -$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } -if ${ac_cv_sys_file_offset_bits+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 +printf %s "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } +if test ${ac_cv_sys_file_offset_bits+y} +then : + printf %s "(cached) " >&6 +else $as_nop while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -13919,22 +18940,23 @@ else We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) +#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_sys_file_offset_bits=no; break fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 @@ -13943,43 +18965,43 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) +#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_sys_file_offset_bits=64; break fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 -$as_echo "$ac_cv_sys_file_offset_bits" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 +printf "%s\n" "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) -cat >>confdefs.h <<_ACEOF -#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits -_ACEOF +printf "%s\n" "#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits" >>confdefs.h ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 -$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } -if ${ac_cv_sys_large_files+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 +printf %s "checking for _LARGE_FILES value needed for large files... " >&6; } +if test ${ac_cv_sys_large_files+y} +then : + printf %s "(cached) " >&6 +else $as_nop while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -13988,22 +19010,23 @@ else We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) +#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_sys_large_files=no; break fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGE_FILES 1 @@ -14012,55 +19035,66 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) +#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_sys_large_files=1; break fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 -$as_echo "$ac_cv_sys_large_files" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 +printf "%s\n" "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) -cat >>confdefs.h <<_ACEOF -#define _LARGE_FILES $ac_cv_sys_large_files -_ACEOF +printf "%s\n" "#define _LARGE_FILES $ac_cv_sys_large_files" >>confdefs.h ;; esac rm -rf conftest* fi - - fi +# Crypto backends + found_crypto=none +found_crypto_str="" +support_clear_memory=no +crypto_errors="" -# Configure parameters -# Check whether --with-openssl was given. -if test "${with_openssl+set}" = set; then : - withval=$with_openssl; use_openssl=$withval -else - use_openssl=auto + + + + + +# Check whether --with-crypto was given. +if test ${with_crypto+y} +then : + withval=$with_crypto; use_crypto=$withval +else $as_nop + use_crypto=auto + fi +case "${use_crypto}" in + auto|openssl|libgcrypt|mbedtls|wincng) + if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else @@ -14078,9 +19112,10 @@ fi # Check whether --with-gnu-ld was given. -if test "${with_gnu_ld+set}" = set; then : +if test ${with_gnu_ld+y} +then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes -else +else $as_nop with_gnu_ld=no fi @@ -14100,8 +19135,8 @@ fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by GCC" >&5 -$as_echo_n "checking for ld used by GCC... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by GCC" >&5 +printf %s "checking for ld used by GCC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw @@ -14130,15 +19165,16 @@ $as_echo_n "checking for ld used by GCC... " >&6; } ;; esac elif test "$with_gnu_ld" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 -$as_echo_n "checking for GNU ld... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +printf %s "checking for GNU ld... " >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 -$as_echo_n "checking for non-GNU ld... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +printf %s "checking for non-GNU ld... " >&6; } fi -if ${acl_cv_path_LD+:} false; then : - $as_echo_n "(cached) " >&6 -else +if test ${acl_cv_path_LD+y} +then : + printf %s "(cached) " >&6 +else $as_nop if test -z "$LD"; then IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR-:}" for ac_dir in $PATH; do @@ -14164,18 +19200,19 @@ fi LD="$acl_cv_path_LD" if test -n "$LD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 -$as_echo "$LD" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 +printf "%s\n" "$LD" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 -$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } -if ${acl_cv_prog_gnu_ld+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +printf %s "checking if the linker ($LD) is GNU ld... " >&6; } +if test ${acl_cv_prog_gnu_ld+y} +then : + printf %s "(cached) " >&6 +else $as_nop # I'd rather use --version here, but apparently some GNU ld's only accept -v. case `$LD -v 2>&1 &1 &5 -$as_echo "$acl_cv_prog_gnu_ld" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $acl_cv_prog_gnu_ld" >&5 +printf "%s\n" "$acl_cv_prog_gnu_ld" >&6; } with_gnu_ld=$acl_cv_prog_gnu_ld - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shared library run path origin" >&5 -$as_echo_n "checking for shared library run path origin... " >&6; } -if ${acl_cv_rpath+:} false; then : - $as_echo_n "(cached) " >&6 -else + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shared library run path origin" >&5 +printf %s "checking for shared library run path origin... " >&6; } +if test ${acl_cv_rpath+y} +then : + printf %s "(cached) " >&6 +else $as_nop CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh @@ -14204,8 +19243,8 @@ else acl_cv_rpath=done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $acl_cv_rpath" >&5 -$as_echo "$acl_cv_rpath" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $acl_cv_rpath" >&5 +printf "%s\n" "$acl_cv_rpath" >&6; } wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" @@ -14216,9 +19255,10 @@ $as_echo "$acl_cv_rpath" >&6; } acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" # Check whether --enable-rpath was given. -if test "${enable_rpath+set}" = set; then : +if test ${enable_rpath+y} +then : enableval=$enable_rpath; : -else +else $as_nop enable_rpath=yes fi @@ -14243,16 +19283,15 @@ fi fi -# Check whether --with-libgcrypt was given. -if test "${with_libgcrypt+set}" = set; then : - withval=$with_libgcrypt; use_libgcrypt=$withval +if test "$use_crypto" = "auto" && test "$found_crypto" = "none" || test "$use_crypto" = "openssl"; then - old_LDFLAGS=$LDFLAGS - old_CFLAGS=$CFLAGS - if test -n "$use_libgcrypt" && test "$use_libgcrypt" != "no"; then - LDFLAGS="$LDFLAGS -L$use_libgcrypt/lib" - CFLAGS="$CFLAGS -I$use_libgcrypt/include" + libssh2_save_CPPFLAGS="$CPPFLAGS" + libssh2_save_LDFLAGS="$LDFLAGS" + + if test "${with_libssl_prefix+set}" = set; then + CPPFLAGS="$CPPFLAGS${CPPFLAGS:+ }-I${with_libssl_prefix}/include" + LDFLAGS="$LDFLAGS${LDFLAGS:+ }-L${with_libssl_prefix}/lib" fi @@ -14264,6 +19303,7 @@ if test "${with_libgcrypt+set}" = set; then : + use_additional=yes acl_save_prefix="$prefix" @@ -14278,9 +19318,10 @@ if test "${with_libgcrypt+set}" = set; then : prefix="$acl_save_prefix" -# Check whether --with-libgcrypt-prefix was given. -if test "${with_libgcrypt_prefix+set}" = set; then : - withval=$with_libgcrypt_prefix; +# Check whether --with-libssl-prefix was given. +if test ${with_libssl_prefix+y} +then : + withval=$with_libssl_prefix; if test "X$withval" = "Xno"; then use_additional=no else @@ -14305,14 +19346,14 @@ if test "${with_libgcrypt_prefix+set}" = set; then : fi - LIBGCRYPT= - LTLIBGCRYPT= - INCGCRYPT= - LIBGCRYPT_PREFIX= + LIBSSL= + LTLIBSSL= + INCSSL= + LIBSSL_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= - names_next_round='gcrypt ' + names_next_round='ssl crypto' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= @@ -14331,9 +19372,9 @@ fi if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" - test -z "$value" || LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$value" + test -z "$value" || LIBSSL="${LIBSSL}${LIBSSL:+ }$value" eval value=\"\$LTLIB$uppername\" - test -z "$value" || LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }$value" + test -z "$value" || LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }$value" else : fi @@ -14390,7 +19431,7 @@ fi fi fi if test "X$found_dir" = "X"; then - for x in $LDFLAGS $LTLIBGCRYPT; do + for x in $LDFLAGS $LTLIBSSL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -14449,10 +19490,10 @@ fi done fi if test "X$found_dir" != "X"; then - LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-L$found_dir -l$name" + LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" + LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" else haveit= for x in $ltrpathdirs; do @@ -14465,10 +19506,10 @@ fi ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" + LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" + LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then @@ -14481,7 +19522,7 @@ fi fi else haveit= - for x in $LDFLAGS $LIBGCRYPT; do + for x in $LDFLAGS $LIBSSL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -14497,28 +19538,28 @@ fi fi done if test -z "$haveit"; then - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-L$found_dir" + LIBSSL="${LIBSSL}${LIBSSL:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" + LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" else - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-l$name" + LIBSSL="${LIBSSL}${LIBSSL:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_a" + LIBSSL="${LIBSSL}${LIBSSL:+ }$found_a" else - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-L$found_dir -l$name" + LIBSSL="${LIBSSL}${LIBSSL:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` - LIBGCRYPT_PREFIX="$basedir" + LIBSSL_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac @@ -14533,7 +19574,7 @@ fi fi fi if test -z "$haveit"; then - for x in $CPPFLAGS $INCGCRYPT; do + for x in $CPPFLAGS $INCSSL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -14550,7 +19591,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then - INCGCRYPT="${INCGCRYPT}${INCGCRYPT:+ }-I$additional_includedir" + INCSSL="${INCSSL}${INCSSL:+ }-I$additional_includedir" fi fi fi @@ -14578,7 +19619,7 @@ fi fi if test -z "$haveit"; then haveit= - for x in $LDFLAGS $LIBGCRYPT; do + for x in $LDFLAGS $LIBSSL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -14595,11 +19636,11 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-L$additional_libdir" + LIBSSL="${LIBSSL}${LIBSSL:+ }-L$additional_libdir" fi fi haveit= - for x in $LDFLAGS $LTLIBGCRYPT; do + for x in $LDFLAGS $LTLIBSSL; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -14616,7 +19657,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-L$additional_libdir" + LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-L$additional_libdir" fi fi fi @@ -14654,15 +19695,15 @@ fi names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$dep" - LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }$dep" + LIBSSL="${LIBSSL}${LIBSSL:+ }$dep" + LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }$dep" ;; esac done fi else - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-l$name" - LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-l$name" + LIBSSL="${LIBSSL}${LIBSSL:+ }-l$name" + LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-l$name" fi fi fi @@ -14678,27 +19719,27 @@ fi libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$flag" + LIBSSL="${LIBSSL}${LIBSSL:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$flag" + LIBSSL="${LIBSSL}${LIBSSL:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do - LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-R$found_dir" + LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-R$found_dir" done fi ac_save_CPPFLAGS="$CPPFLAGS" - for element in $INCGCRYPT; do + for element in $INCSSL; do haveit= for x in $CPPFLAGS; do @@ -14721,54 +19762,54 @@ fi done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libgcrypt" >&5 -$as_echo_n "checking for libgcrypt... " >&6; } -if ${ac_cv_libgcrypt+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for libssl" >&5 +printf %s "checking for libssl... " >&6; } +if test ${ac_cv_libssl+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_LIBS="$LIBS" - LIBS="$LIBS $LIBGCRYPT" + LIBS="$LIBS $LIBSSL" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - - #include - +#include int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_libgcrypt=yes -else - ac_cv_libgcrypt=no +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_libssl=yes +else $as_nop + ac_cv_libssl=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_save_LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libgcrypt" >&5 -$as_echo "$ac_cv_libgcrypt" >&6; } - if test "$ac_cv_libgcrypt" = yes; then - HAVE_LIBGCRYPT=yes +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libssl" >&5 +printf "%s\n" "$ac_cv_libssl" >&6; } + if test "$ac_cv_libssl" = yes; then + HAVE_LIBSSL=yes -$as_echo "#define HAVE_LIBGCRYPT 1" >>confdefs.h +printf "%s\n" "#define HAVE_LIBSSL 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libgcrypt" >&5 -$as_echo_n "checking how to link with libgcrypt... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBGCRYPT" >&5 -$as_echo "$LIBGCRYPT" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to link with libssl" >&5 +printf %s "checking how to link with libssl... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIBSSL" >&5 +printf "%s\n" "$LIBSSL" >&6; } else - HAVE_LIBGCRYPT=no + HAVE_LIBSSL=no CPPFLAGS="$ac_save_CPPFLAGS" - LIBGCRYPT= - LTLIBGCRYPT= - LIBGCRYPT_PREFIX= + LIBSSL= + LTLIBSSL= + LIBSSL_PREFIX= fi @@ -14778,31 +19819,50 @@ $as_echo "$LIBGCRYPT" >&6; } - if test "$ac_cv_libgcrypt" = "yes"; then + LDFLAGS="$libssh2_save_LDFLAGS" + + if test "$ac_cv_libssl" = "yes"; then : + + +printf "%s\n" "#define LIBSSH2_OPENSSL 1" >>confdefs.h -$as_echo "#define LIBSSH2_LIBGCRYPT 1" >>confdefs.h + LIBSREQUIRED="$LIBSREQUIRED${LIBSREQUIRED:+ }libssl libcrypto" + + # Not all OpenSSL have AES-CTR functions. + libssh2_save_LIBS="$LIBS" + LIBS="$LIBS $LIBSSL" + ac_fn_c_check_func "$LINENO" "EVP_aes_128_ctr" "ac_cv_func_EVP_aes_128_ctr" +if test "x$ac_cv_func_EVP_aes_128_ctr" = xyes +then : + printf "%s\n" "#define HAVE_EVP_AES_128_CTR 1" >>confdefs.h + +fi + + LIBS="$libssh2_save_LIBS" + + found_crypto="openssl" + found_crypto_str="OpenSSL (AES-CTR: ${ac_cv_func_EVP_aes_128_ctr:-N/A})" - LIBSREQUIRED= # libgcrypt doesn't provide a .pc file. sad face. - LIBS="$LIBS -lgcrypt" - found_crypto=libgcrypt else - # restore - LDFLAGS=$old_LDFLAGS - CFLAGS=$old_CFLAGS + CPPFLAGS="$libssh2_save_CPPFLAGS" fi -else - use_libgcrypt=auto + test "$found_crypto" = "none" && + crypto_errors="${crypto_errors}No openssl crypto library found! +" fi +if test "$use_crypto" = "auto" && test "$found_crypto" = "none" || test "$use_crypto" = "libgcrypt"; then -# Check whether --with-wincng was given. -if test "${with_wincng+set}" = set; then : - withval=$with_wincng; use_wincng=$withval + libssh2_save_CPPFLAGS="$CPPFLAGS" + libssh2_save_LDFLAGS="$LDFLAGS" - # Look for Windows Cryptography API: Next Generation + if test "${with_libgcrypt_prefix+set}" = set; then + CPPFLAGS="$CPPFLAGS${CPPFLAGS:+ }-I${with_libgcrypt_prefix}/include" + LDFLAGS="$LDFLAGS${LDFLAGS:+ }-L${with_libgcrypt_prefix}/lib" + fi @@ -14828,9 +19888,10 @@ if test "${with_wincng+set}" = set; then : prefix="$acl_save_prefix" -# Check whether --with-libbcrypt-prefix was given. -if test "${with_libbcrypt_prefix+set}" = set; then : - withval=$with_libbcrypt_prefix; +# Check whether --with-libgcrypt-prefix was given. +if test ${with_libgcrypt_prefix+y} +then : + withval=$with_libgcrypt_prefix; if test "X$withval" = "Xno"; then use_additional=no else @@ -14855,14 +19916,14 @@ if test "${with_libbcrypt_prefix+set}" = set; then : fi - LIBBCRYPT= - LTLIBBCRYPT= - INCBCRYPT= - LIBBCRYPT_PREFIX= + LIBGCRYPT= + LTLIBGCRYPT= + INCGCRYPT= + LIBGCRYPT_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= - names_next_round='bcrypt ' + names_next_round='gcrypt ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= @@ -14881,9 +19942,9 @@ fi if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" - test -z "$value" || LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$value" + test -z "$value" || LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$value" eval value=\"\$LTLIB$uppername\" - test -z "$value" || LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }$value" + test -z "$value" || LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }$value" else : fi @@ -14940,7 +20001,7 @@ fi fi fi if test "X$found_dir" = "X"; then - for x in $LDFLAGS $LTLIBBCRYPT; do + for x in $LDFLAGS $LTLIBGCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -14999,10 +20060,10 @@ fi done fi if test "X$found_dir" != "X"; then - LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-L$found_dir -l$name" + LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" else haveit= for x in $ltrpathdirs; do @@ -15015,10 +20076,10 @@ fi ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then @@ -15031,7 +20092,7 @@ fi fi else haveit= - for x in $LDFLAGS $LIBBCRYPT; do + for x in $LDFLAGS $LIBGCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15047,28 +20108,28 @@ fi fi done if test -z "$haveit"; then - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-L$found_dir" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_so" else - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-l$name" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_a" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$found_a" else - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-L$found_dir -l$name" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` - LIBBCRYPT_PREFIX="$basedir" + LIBGCRYPT_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac @@ -15083,7 +20144,7 @@ fi fi fi if test -z "$haveit"; then - for x in $CPPFLAGS $INCBCRYPT; do + for x in $CPPFLAGS $INCGCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15100,7 +20161,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then - INCBCRYPT="${INCBCRYPT}${INCBCRYPT:+ }-I$additional_includedir" + INCGCRYPT="${INCGCRYPT}${INCGCRYPT:+ }-I$additional_includedir" fi fi fi @@ -15128,7 +20189,7 @@ fi fi if test -z "$haveit"; then haveit= - for x in $LDFLAGS $LIBBCRYPT; do + for x in $LDFLAGS $LIBGCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15145,11 +20206,11 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-L$additional_libdir" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-L$additional_libdir" fi fi haveit= - for x in $LDFLAGS $LTLIBBCRYPT; do + for x in $LDFLAGS $LTLIBGCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15166,7 +20227,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-L$additional_libdir" + LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-L$additional_libdir" fi fi fi @@ -15204,15 +20265,15 @@ fi names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$dep" - LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }$dep" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$dep" + LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }$dep" ;; esac done fi else - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-l$name" - LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-l$name" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }-l$name" + LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-l$name" fi fi fi @@ -15228,27 +20289,27 @@ fi libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$flag" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$flag" + LIBGCRYPT="${LIBGCRYPT}${LIBGCRYPT:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do - LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-R$found_dir" + LTLIBGCRYPT="${LTLIBGCRYPT}${LTLIBGCRYPT:+ }-R$found_dir" done fi ac_save_CPPFLAGS="$CPPFLAGS" - for element in $INCBCRYPT; do + for element in $INCGCRYPT; do haveit= for x in $CPPFLAGS; do @@ -15271,62 +20332,92 @@ fi done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libbcrypt" >&5 -$as_echo_n "checking for libbcrypt... " >&6; } -if ${ac_cv_libbcrypt+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for libgcrypt" >&5 +printf %s "checking for libgcrypt... " >&6; } +if test ${ac_cv_libgcrypt+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_LIBS="$LIBS" - LIBS="$LIBS $LIBBCRYPT" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - #include - #include - + LIBS="$LIBS $LIBGCRYPT" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_libbcrypt=yes -else - ac_cv_libbcrypt=no +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_libgcrypt=yes +else $as_nop + ac_cv_libgcrypt=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_save_LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libbcrypt" >&5 -$as_echo "$ac_cv_libbcrypt" >&6; } - if test "$ac_cv_libbcrypt" = yes; then - HAVE_LIBBCRYPT=yes +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libgcrypt" >&5 +printf "%s\n" "$ac_cv_libgcrypt" >&6; } + if test "$ac_cv_libgcrypt" = yes; then + HAVE_LIBGCRYPT=yes -$as_echo "#define HAVE_LIBBCRYPT 1" >>confdefs.h +printf "%s\n" "#define HAVE_LIBGCRYPT 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libbcrypt" >&5 -$as_echo_n "checking how to link with libbcrypt... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBBCRYPT" >&5 -$as_echo "$LIBBCRYPT" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to link with libgcrypt" >&5 +printf %s "checking how to link with libgcrypt... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIBGCRYPT" >&5 +printf "%s\n" "$LIBGCRYPT" >&6; } else - HAVE_LIBBCRYPT=no + HAVE_LIBGCRYPT=no CPPFLAGS="$ac_save_CPPFLAGS" - LIBBCRYPT= - LTLIBBCRYPT= - LIBBCRYPT_PREFIX= + LIBGCRYPT= + LTLIBGCRYPT= + LIBGCRYPT_PREFIX= + fi + + + + + + + + + LDFLAGS="$libssh2_save_LDFLAGS" + + if test "$ac_cv_libgcrypt" = "yes"; then : + + +printf "%s\n" "#define LIBSSH2_LIBGCRYPT 1" >>confdefs.h + + found_crypto="libgcrypt" + + else + CPPFLAGS="$libssh2_save_CPPFLAGS" fi + test "$found_crypto" = "none" && + crypto_errors="${crypto_errors}No libgcrypt crypto library found! +" +fi +if test "$use_crypto" = "auto" && test "$found_crypto" = "none" || test "$use_crypto" = "mbedtls"; then + libssh2_save_CPPFLAGS="$CPPFLAGS" + libssh2_save_LDFLAGS="$LDFLAGS" + if test "${with_libmbedcrypto_prefix+set}" = set; then + CPPFLAGS="$CPPFLAGS${CPPFLAGS:+ }-I${with_libmbedcrypto_prefix}/include" + LDFLAGS="$LDFLAGS${LDFLAGS:+ }-L${with_libmbedcrypto_prefix}/lib" + fi @@ -15352,9 +20443,10 @@ $as_echo "$LIBBCRYPT" >&6; } prefix="$acl_save_prefix" -# Check whether --with-libcrypt32-prefix was given. -if test "${with_libcrypt32_prefix+set}" = set; then : - withval=$with_libcrypt32_prefix; +# Check whether --with-libmbedcrypto-prefix was given. +if test ${with_libmbedcrypto_prefix+y} +then : + withval=$with_libmbedcrypto_prefix; if test "X$withval" = "Xno"; then use_additional=no else @@ -15379,14 +20471,14 @@ if test "${with_libcrypt32_prefix+set}" = set; then : fi - LIBCRYPT32= - LTLIBCRYPT32= - INCCRYPT32= - LIBCRYPT32_PREFIX= + LIBMBEDCRYPTO= + LTLIBMBEDCRYPTO= + INCMBEDCRYPTO= + LIBMBEDCRYPTO_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= - names_next_round='crypt32 ' + names_next_round='mbedcrypto ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= @@ -15405,9 +20497,9 @@ fi if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" - test -z "$value" || LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$value" + test -z "$value" || LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$value" eval value=\"\$LTLIB$uppername\" - test -z "$value" || LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }$value" + test -z "$value" || LTLIBMBEDCRYPTO="${LTLIBMBEDCRYPTO}${LTLIBMBEDCRYPTO:+ }$value" else : fi @@ -15464,7 +20556,7 @@ fi fi fi if test "X$found_dir" = "X"; then - for x in $LDFLAGS $LTLIBCRYPT32; do + for x in $LDFLAGS $LTLIBMBEDCRYPTO; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15523,10 +20615,10 @@ fi done fi if test "X$found_dir" != "X"; then - LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-L$found_dir -l$name" + LTLIBMBEDCRYPTO="${LTLIBMBEDCRYPTO}${LTLIBMBEDCRYPTO:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$found_so" else haveit= for x in $ltrpathdirs; do @@ -15539,10 +20631,10 @@ fi ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then @@ -15555,7 +20647,7 @@ fi fi else haveit= - for x in $LDFLAGS $LIBCRYPT32; do + for x in $LDFLAGS $LIBMBEDCRYPTO; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15571,28 +20663,28 @@ fi fi done if test -z "$haveit"; then - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-L$found_dir" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$found_so" else - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-l$name" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_a" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$found_a" else - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-L$found_dir -l$name" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` - LIBCRYPT32_PREFIX="$basedir" + LIBMBEDCRYPTO_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac @@ -15607,7 +20699,7 @@ fi fi fi if test -z "$haveit"; then - for x in $CPPFLAGS $INCCRYPT32; do + for x in $CPPFLAGS $INCMBEDCRYPTO; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15624,7 +20716,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then - INCCRYPT32="${INCCRYPT32}${INCCRYPT32:+ }-I$additional_includedir" + INCMBEDCRYPTO="${INCMBEDCRYPTO}${INCMBEDCRYPTO:+ }-I$additional_includedir" fi fi fi @@ -15652,7 +20744,7 @@ fi fi if test -z "$haveit"; then haveit= - for x in $LDFLAGS $LIBCRYPT32; do + for x in $LDFLAGS $LIBMBEDCRYPTO; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15669,11 +20761,11 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-L$additional_libdir" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }-L$additional_libdir" fi fi haveit= - for x in $LDFLAGS $LTLIBCRYPT32; do + for x in $LDFLAGS $LTLIBMBEDCRYPTO; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -15690,7 +20782,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-L$additional_libdir" + LTLIBMBEDCRYPTO="${LTLIBMBEDCRYPTO}${LTLIBMBEDCRYPTO:+ }-L$additional_libdir" fi fi fi @@ -15728,15 +20820,15 @@ fi names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$dep" - LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }$dep" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$dep" + LTLIBMBEDCRYPTO="${LTLIBMBEDCRYPTO}${LTLIBMBEDCRYPTO:+ }$dep" ;; esac done fi else - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-l$name" - LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-l$name" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }-l$name" + LTLIBMBEDCRYPTO="${LTLIBMBEDCRYPTO}${LTLIBMBEDCRYPTO:+ }-l$name" fi fi fi @@ -15752,27 +20844,27 @@ fi libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$flag" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$flag" + LIBMBEDCRYPTO="${LIBMBEDCRYPTO}${LIBMBEDCRYPTO:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do - LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-R$found_dir" + LTLIBMBEDCRYPTO="${LTLIBMBEDCRYPTO}${LTLIBMBEDCRYPTO:+ }-R$found_dir" done fi ac_save_CPPFLAGS="$CPPFLAGS" - for element in $INCCRYPT32; do + for element in $INCMBEDCRYPTO; do haveit= for x in $CPPFLAGS; do @@ -15795,55 +20887,54 @@ fi done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libcrypt32" >&5 -$as_echo_n "checking for libcrypt32... " >&6; } -if ${ac_cv_libcrypt32+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for libmbedcrypto" >&5 +printf %s "checking for libmbedcrypto... " >&6; } +if test ${ac_cv_libmbedcrypto+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_LIBS="$LIBS" - LIBS="$LIBS $LIBCRYPT32" + LIBS="$LIBS $LIBMBEDCRYPTO" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - - #include - #include - +#include int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_libcrypt32=yes -else - ac_cv_libcrypt32=no +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_libmbedcrypto=yes +else $as_nop + ac_cv_libmbedcrypto=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_save_LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libcrypt32" >&5 -$as_echo "$ac_cv_libcrypt32" >&6; } - if test "$ac_cv_libcrypt32" = yes; then - HAVE_LIBCRYPT32=yes +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libmbedcrypto" >&5 +printf "%s\n" "$ac_cv_libmbedcrypto" >&6; } + if test "$ac_cv_libmbedcrypto" = yes; then + HAVE_LIBMBEDCRYPTO=yes -$as_echo "#define HAVE_LIBCRYPT32 1" >>confdefs.h +printf "%s\n" "#define HAVE_LIBMBEDCRYPTO 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libcrypt32" >&5 -$as_echo_n "checking how to link with libcrypt32... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBCRYPT32" >&5 -$as_echo "$LIBCRYPT32" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to link with libmbedcrypto" >&5 +printf %s "checking how to link with libmbedcrypto... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIBMBEDCRYPTO" >&5 +printf "%s\n" "$LIBMBEDCRYPTO" >&6; } else - HAVE_LIBCRYPT32=no + HAVE_LIBMBEDCRYPTO=no CPPFLAGS="$ac_save_CPPFLAGS" - LIBCRYPT32= - LTLIBCRYPT32= - LIBCRYPT32_PREFIX= + LIBMBEDCRYPTO= + LTLIBMBEDCRYPTO= + LIBMBEDCRYPTO_PREFIX= fi @@ -15852,68 +20943,145 @@ $as_echo "$LIBCRYPT32" >&6; } - for ac_header in ntdef.h ntstatus.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" " - #include -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF + LDFLAGS="$libssh2_save_LDFLAGS" -fi + if test "$ac_cv_libmbedcrypto" = "yes"; then : -done - ac_fn_c_check_decl "$LINENO" "SecureZeroMemory" "ac_cv_have_decl_SecureZeroMemory" " - #include +printf "%s\n" "#define LIBSSH2_MBEDTLS 1" >>confdefs.h + + LIBS="$LIBS -lmbedcrypto" + found_crypto="mbedtls" + support_clear_memory=yes + + else + CPPFLAGS="$libssh2_save_CPPFLAGS" + fi + + test "$found_crypto" = "none" && + crypto_errors="${crypto_errors}No mbedtls crypto library found! " -if test "x$ac_cv_have_decl_SecureZeroMemory" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC options needed to detect all undeclared functions" >&5 +printf %s "checking for $CC options needed to detect all undeclared functions... " >&6; } +if test ${ac_cv_c_undeclared_builtin_options+y} +then : + printf %s "(cached) " >&6 +else $as_nop + ac_save_CFLAGS=$CFLAGS + ac_cv_c_undeclared_builtin_options='cannot detect' + for ac_arg in '' -fno-builtin; do + CFLAGS="$ac_save_CFLAGS $ac_arg" + # This test program should *not* compile successfully. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main (void) +{ +(void) strchr; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + +else $as_nop + # This test program should compile successfully. + # No library function is consistently available on + # freestanding implementations, so test against a dummy + # declaration. Include always-available headers on the + # off chance that they somehow elicit warnings. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include +extern void ac_decl (int, char *); + +int +main (void) +{ +(void) ac_decl (0, (char *) 0); + (void) ac_decl; -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_SECUREZEROMEMORY $ac_have_decl + ; + return 0; +} _ACEOF +if ac_fn_c_try_compile "$LINENO" +then : + if test x"$ac_arg" = x +then : + ac_cv_c_undeclared_builtin_options='none needed' +else $as_nop + ac_cv_c_undeclared_builtin_options=$ac_arg +fi + break +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + done + CFLAGS=$ac_save_CFLAGS + +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_undeclared_builtin_options" >&5 +printf "%s\n" "$ac_cv_c_undeclared_builtin_options" >&6; } + case $ac_cv_c_undeclared_builtin_options in #( + 'cannot detect') : + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot make $CC report undeclared builtins +See \`config.log' for more details" "$LINENO" 5; } ;; #( + 'none needed') : + ac_c_undeclared_builtin_options='' ;; #( + *) : + ac_c_undeclared_builtin_options=$ac_cv_c_undeclared_builtin_options ;; +esac - if test "$ac_cv_libbcrypt" = "yes"; then +if test "$use_crypto" = "auto" && test "$found_crypto" = "none" || test "$use_crypto" = "wincng"; then -$as_echo "#define LIBSSH2_WINCNG 1" >>confdefs.h + # Look for Windows Cryptography API: Next Generation - LIBSREQUIRED= # wincng doesn't provide a .pc file. sad face. - LIBS="$LIBS -lbcrypt" - if test "$ac_cv_libcrypt32" = "yes"; then - LIBS="$LIBS -lcrypt32" - fi - found_crypto="Windows Cryptography API: Next Generation" - if test "$ac_cv_have_decl_SecureZeroMemory" = "yes"; then - support_clear_memory=yes - fi - fi + ac_fn_c_check_header_compile "$LINENO" "ntdef.h" "ac_cv_header_ntdef_h" "#include +" +if test "x$ac_cv_header_ntdef_h" = xyes +then : + printf "%s\n" "#define HAVE_NTDEF_H 1" >>confdefs.h + +fi +ac_fn_c_check_header_compile "$LINENO" "ntstatus.h" "ac_cv_header_ntstatus_h" "#include +" +if test "x$ac_cv_header_ntstatus_h" = xyes +then : + printf "%s\n" "#define HAVE_NTSTATUS_H 1" >>confdefs.h +fi -else - use_wincng=auto + ac_fn_check_decl "$LINENO" "SecureZeroMemory" "ac_cv_have_decl_SecureZeroMemory" "#include +" "$ac_c_undeclared_builtin_options" "CFLAGS" +if test "x$ac_cv_have_decl_SecureZeroMemory" = xyes +then : + ac_have_decl=1 +else $as_nop + ac_have_decl=0 fi +printf "%s\n" "#define HAVE_DECL_SECUREZEROMEMORY $ac_have_decl" >>confdefs.h -# Check whether --with-mbedtls was given. -if test "${with_mbedtls+set}" = set; then : - withval=$with_mbedtls; use_mbedtls=$withval + libssh2_save_CPPFLAGS="$CPPFLAGS" + libssh2_save_LDFLAGS="$LDFLAGS" - old_LDFLAGS=$LDFLAGS - old_CFLAGS=$CFLAGS - if test -n "$use_mbedtls" && test "$use_mbedtls" != "no"; then - LDFLAGS="$LDFLAGS -L$use_mbedtls/lib" - CFLAGS="$CFLAGS -I$use_mbedtls/include" + if test "${with_libcrypt32_prefix+set}" = set; then + CPPFLAGS="$CPPFLAGS${CPPFLAGS:+ }-I${with_libcrypt32_prefix}/include" + LDFLAGS="$LDFLAGS${LDFLAGS:+ }-L${with_libcrypt32_prefix}/lib" fi @@ -15940,9 +21108,10 @@ if test "${with_mbedtls+set}" = set; then : prefix="$acl_save_prefix" -# Check whether --with-libmbedtls-prefix was given. -if test "${with_libmbedtls_prefix+set}" = set; then : - withval=$with_libmbedtls_prefix; +# Check whether --with-libcrypt32-prefix was given. +if test ${with_libcrypt32_prefix+y} +then : + withval=$with_libcrypt32_prefix; if test "X$withval" = "Xno"; then use_additional=no else @@ -15967,14 +21136,14 @@ if test "${with_libmbedtls_prefix+set}" = set; then : fi - LIBMBEDTLS= - LTLIBMBEDTLS= - INCMBEDTLS= - LIBMBEDTLS_PREFIX= + LIBCRYPT32= + LTLIBCRYPT32= + INCCRYPT32= + LIBCRYPT32_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= - names_next_round='mbedtls ' + names_next_round='crypt32 ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= @@ -15993,9 +21162,9 @@ fi if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" - test -z "$value" || LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$value" + test -z "$value" || LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$value" eval value=\"\$LTLIB$uppername\" - test -z "$value" || LTLIBMBEDTLS="${LTLIBMBEDTLS}${LTLIBMBEDTLS:+ }$value" + test -z "$value" || LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }$value" else : fi @@ -16052,7 +21221,7 @@ fi fi fi if test "X$found_dir" = "X"; then - for x in $LDFLAGS $LTLIBMBEDTLS; do + for x in $LDFLAGS $LTLIBCRYPT32; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16111,10 +21280,10 @@ fi done fi if test "X$found_dir" != "X"; then - LTLIBMBEDTLS="${LTLIBMBEDTLS}${LTLIBMBEDTLS:+ }-L$found_dir -l$name" + LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$found_so" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" else haveit= for x in $ltrpathdirs; do @@ -16127,10 +21296,10 @@ fi ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$found_so" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$found_so" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then @@ -16143,7 +21312,7 @@ fi fi else haveit= - for x in $LDFLAGS $LIBMBEDTLS; do + for x in $LDFLAGS $LIBCRYPT32; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16159,28 +21328,28 @@ fi fi done if test -z "$haveit"; then - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }-L$found_dir" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$found_so" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_so" else - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }-l$name" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$found_a" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$found_a" else - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }-L$found_dir -l$name" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` - LIBMBEDTLS_PREFIX="$basedir" + LIBCRYPT32_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac @@ -16195,7 +21364,7 @@ fi fi fi if test -z "$haveit"; then - for x in $CPPFLAGS $INCMBEDTLS; do + for x in $CPPFLAGS $INCCRYPT32; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16212,7 +21381,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then - INCMBEDTLS="${INCMBEDTLS}${INCMBEDTLS:+ }-I$additional_includedir" + INCCRYPT32="${INCCRYPT32}${INCCRYPT32:+ }-I$additional_includedir" fi fi fi @@ -16240,7 +21409,7 @@ fi fi if test -z "$haveit"; then haveit= - for x in $LDFLAGS $LIBMBEDTLS; do + for x in $LDFLAGS $LIBCRYPT32; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16257,11 +21426,11 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }-L$additional_libdir" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-L$additional_libdir" fi fi haveit= - for x in $LDFLAGS $LTLIBMBEDTLS; do + for x in $LDFLAGS $LTLIBCRYPT32; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16278,7 +21447,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LTLIBMBEDTLS="${LTLIBMBEDTLS}${LTLIBMBEDTLS:+ }-L$additional_libdir" + LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-L$additional_libdir" fi fi fi @@ -16316,15 +21485,15 @@ fi names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$dep" - LTLIBMBEDTLS="${LTLIBMBEDTLS}${LTLIBMBEDTLS:+ }$dep" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$dep" + LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }$dep" ;; esac done fi else - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }-l$name" - LTLIBMBEDTLS="${LTLIBMBEDTLS}${LTLIBMBEDTLS:+ }-l$name" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }-l$name" + LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-l$name" fi fi fi @@ -16340,27 +21509,27 @@ fi libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$flag" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBMBEDTLS="${LIBMBEDTLS}${LIBMBEDTLS:+ }$flag" + LIBCRYPT32="${LIBCRYPT32}${LIBCRYPT32:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do - LTLIBMBEDTLS="${LTLIBMBEDTLS}${LTLIBMBEDTLS:+ }-R$found_dir" + LTLIBCRYPT32="${LTLIBCRYPT32}${LTLIBCRYPT32:+ }-R$found_dir" done fi ac_save_CPPFLAGS="$CPPFLAGS" - for element in $INCMBEDTLS; do + for element in $INCCRYPT32; do haveit= for x in $CPPFLAGS; do @@ -16383,54 +21552,57 @@ fi done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libmbedtls" >&5 -$as_echo_n "checking for libmbedtls... " >&6; } -if ${ac_cv_libmbedtls+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for libcrypt32" >&5 +printf %s "checking for libcrypt32... " >&6; } +if test ${ac_cv_libcrypt32+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_LIBS="$LIBS" - LIBS="$LIBS $LIBMBEDTLS" + LIBS="$LIBS $LIBCRYPT32" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - #include + #include + #include int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_libmbedtls=yes -else - ac_cv_libmbedtls=no +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_libcrypt32=yes +else $as_nop + ac_cv_libcrypt32=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_save_LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libmbedtls" >&5 -$as_echo "$ac_cv_libmbedtls" >&6; } - if test "$ac_cv_libmbedtls" = yes; then - HAVE_LIBMBEDTLS=yes +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libcrypt32" >&5 +printf "%s\n" "$ac_cv_libcrypt32" >&6; } + if test "$ac_cv_libcrypt32" = yes; then + HAVE_LIBCRYPT32=yes -$as_echo "#define HAVE_LIBMBEDTLS 1" >>confdefs.h +printf "%s\n" "#define HAVE_LIBCRYPT32 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libmbedtls" >&5 -$as_echo_n "checking how to link with libmbedtls... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBMBEDTLS" >&5 -$as_echo "$LIBMBEDTLS" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to link with libcrypt32" >&5 +printf %s "checking how to link with libcrypt32... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIBCRYPT32" >&5 +printf "%s\n" "$LIBCRYPT32" >&6; } else - HAVE_LIBMBEDTLS=no + HAVE_LIBCRYPT32=no CPPFLAGS="$ac_save_CPPFLAGS" - LIBMBEDTLS= - LTLIBMBEDTLS= - LIBMBEDTLS_PREFIX= + LIBCRYPT32= + LTLIBCRYPT32= + LIBCRYPT32_PREFIX= fi @@ -16440,39 +21612,23 @@ $as_echo "$LIBMBEDTLS" >&6; } - if test "$ac_cv_libmbedtls" = "yes"; then + LDFLAGS="$libssh2_save_LDFLAGS" -$as_echo "#define LIBSSH2_MBEDTLS 1" >>confdefs.h + if test "$ac_cv_libcrypt32" = "yes"; then : - LIBSREQUIRED= # mbedtls doesn't provide a .pc file - LIBS="$LIBS -lmbedtls -lmbedcrypto" - found_crypto=libmbedtls - support_clear_memory=yes else - # restore - LDFLAGS=$old_LDFLAGS - CFLAGS=$old_CFLAGS + CPPFLAGS="$libssh2_save_CPPFLAGS" fi -else - use_mbedtls=auto - -fi - - -# Check whether --with-libz was given. -if test "${with_libz+set}" = set; then : - withval=$with_libz; use_libz=$withval -else - use_libz=auto -fi - + libssh2_save_CPPFLAGS="$CPPFLAGS" + libssh2_save_LDFLAGS="$LDFLAGS" -support_clear_memory=no + if test "${with_libbcrypt_prefix+set}" = set; then + CPPFLAGS="$CPPFLAGS${CPPFLAGS:+ }-I${with_libbcrypt_prefix}/include" + LDFLAGS="$LDFLAGS${LDFLAGS:+ }-L${with_libbcrypt_prefix}/lib" + fi -# Look for OpenSSL -if test "$found_crypto" = "none" && test "$use_openssl" != "no"; then @@ -16497,9 +21653,10 @@ if test "$found_crypto" = "none" && test "$use_openssl" != "no"; then prefix="$acl_save_prefix" -# Check whether --with-libssl-prefix was given. -if test "${with_libssl_prefix+set}" = set; then : - withval=$with_libssl_prefix; +# Check whether --with-libbcrypt-prefix was given. +if test ${with_libbcrypt_prefix+y} +then : + withval=$with_libbcrypt_prefix; if test "X$withval" = "Xno"; then use_additional=no else @@ -16524,14 +21681,14 @@ if test "${with_libssl_prefix+set}" = set; then : fi - LIBSSL= - LTLIBSSL= - INCSSL= - LIBSSL_PREFIX= + LIBBCRYPT= + LTLIBBCRYPT= + INCBCRYPT= + LIBBCRYPT_PREFIX= rpathdirs= ltrpathdirs= names_already_handled= - names_next_round='ssl crypto' + names_next_round='bcrypt ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= @@ -16550,9 +21707,9 @@ fi if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" - test -z "$value" || LIBSSL="${LIBSSL}${LIBSSL:+ }$value" + test -z "$value" || LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$value" eval value=\"\$LTLIB$uppername\" - test -z "$value" || LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }$value" + test -z "$value" || LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }$value" else : fi @@ -16609,7 +21766,7 @@ fi fi fi if test "X$found_dir" = "X"; then - for x in $LDFLAGS $LTLIBSSL; do + for x in $LDFLAGS $LTLIBBCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16668,10 +21825,10 @@ fi done fi if test "X$found_dir" != "X"; then - LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-L$found_dir -l$name" + LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no || test "X$found_dir" = "X/usr/$acl_libdirstem"; then - LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" else haveit= for x in $ltrpathdirs; do @@ -16684,10 +21841,10 @@ fi ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then - LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then - LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then @@ -16700,7 +21857,7 @@ fi fi else haveit= - for x in $LDFLAGS $LIBSSL; do + for x in $LDFLAGS $LIBBCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16716,28 +21873,28 @@ fi fi done if test -z "$haveit"; then - LIBSSL="${LIBSSL}${LIBSSL:+ }-L$found_dir" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then - LIBSSL="${LIBSSL}${LIBSSL:+ }$found_so" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_so" else - LIBSSL="${LIBSSL}${LIBSSL:+ }-l$name" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then - LIBSSL="${LIBSSL}${LIBSSL:+ }$found_a" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$found_a" else - LIBSSL="${LIBSSL}${LIBSSL:+ }-L$found_dir -l$name" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` - LIBSSL_PREFIX="$basedir" + LIBBCRYPT_PREFIX="$basedir" additional_includedir="$basedir/include" ;; esac @@ -16752,7 +21909,7 @@ fi fi fi if test -z "$haveit"; then - for x in $CPPFLAGS $INCSSL; do + for x in $CPPFLAGS $INCBCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16769,7 +21926,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then - INCSSL="${INCSSL}${INCSSL:+ }-I$additional_includedir" + INCBCRYPT="${INCBCRYPT}${INCBCRYPT:+ }-I$additional_includedir" fi fi fi @@ -16797,7 +21954,7 @@ fi fi if test -z "$haveit"; then haveit= - for x in $LDFLAGS $LIBSSL; do + for x in $LDFLAGS $LIBBCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16814,11 +21971,11 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LIBSSL="${LIBSSL}${LIBSSL:+ }-L$additional_libdir" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-L$additional_libdir" fi fi haveit= - for x in $LDFLAGS $LTLIBSSL; do + for x in $LDFLAGS $LTLIBBCRYPT; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" @@ -16835,7 +21992,7 @@ fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then - LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-L$additional_libdir" + LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-L$additional_libdir" fi fi fi @@ -16873,15 +22030,15 @@ fi names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) - LIBSSL="${LIBSSL}${LIBSSL:+ }$dep" - LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }$dep" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$dep" + LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }$dep" ;; esac done fi else - LIBSSL="${LIBSSL}${LIBSSL:+ }-l$name" - LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-l$name" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }-l$name" + LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-l$name" fi fi fi @@ -16897,27 +22054,27 @@ fi libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBSSL="${LIBSSL}${LIBSSL:+ }$flag" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" - LIBSSL="${LIBSSL}${LIBSSL:+ }$flag" + LIBBCRYPT="${LIBBCRYPT}${LIBBCRYPT:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do - LTLIBSSL="${LTLIBSSL}${LTLIBSSL:+ }-R$found_dir" + LTLIBBCRYPT="${LTLIBBCRYPT}${LTLIBBCRYPT:+ }-R$found_dir" done fi ac_save_CPPFLAGS="$CPPFLAGS" - for element in $INCSSL; do + for element in $INCBCRYPT; do haveit= for x in $CPPFLAGS; do @@ -16940,52 +22097,57 @@ fi done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libssl" >&5 -$as_echo_n "checking for libssl... " >&6; } -if ${ac_cv_libssl+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for libbcrypt" >&5 +printf %s "checking for libbcrypt... " >&6; } +if test ${ac_cv_libbcrypt+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_LIBS="$LIBS" - LIBS="$LIBS $LIBSSL" + LIBS="$LIBS $LIBBCRYPT" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include + + #include + #include + int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_libssl=yes -else - ac_cv_libssl=no +if ac_fn_c_try_link "$LINENO" +then : + ac_cv_libbcrypt=yes +else $as_nop + ac_cv_libbcrypt=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_save_LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libssl" >&5 -$as_echo "$ac_cv_libssl" >&6; } - if test "$ac_cv_libssl" = yes; then - HAVE_LIBSSL=yes +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libbcrypt" >&5 +printf "%s\n" "$ac_cv_libbcrypt" >&6; } + if test "$ac_cv_libbcrypt" = yes; then + HAVE_LIBBCRYPT=yes -$as_echo "#define HAVE_LIBSSL 1" >>confdefs.h +printf "%s\n" "#define HAVE_LIBBCRYPT 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libssl" >&5 -$as_echo_n "checking how to link with libssl... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBSSL" >&5 -$as_echo "$LIBSSL" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to link with libbcrypt" >&5 +printf %s "checking how to link with libbcrypt... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIBBCRYPT" >&5 +printf "%s\n" "$LIBBCRYPT" >&6; } else - HAVE_LIBSSL=no + HAVE_LIBBCRYPT=no CPPFLAGS="$ac_save_CPPFLAGS" - LIBSSL= - LTLIBSSL= - LIBSSL_PREFIX= + LIBBCRYPT= + LTLIBBCRYPT= + LIBBCRYPT_PREFIX= fi @@ -16994,33 +22156,49 @@ $as_echo "$LIBSSL" >&6; } -fi -if test "$ac_cv_libssl" = "yes"; then -$as_echo "#define LIBSSH2_OPENSSL 1" >>confdefs.h + LDFLAGS="$libssh2_save_LDFLAGS" - LIBSREQUIRED=libssl,libcrypto + if test "$ac_cv_libbcrypt" = "yes"; then : - # Not all OpenSSL have AES-CTR functions. - save_LIBS="$LIBS" - LIBS="$LIBS $LIBSSL" - for ac_func in EVP_aes_128_ctr -do : - ac_fn_c_check_func "$LINENO" "EVP_aes_128_ctr" "ac_cv_func_EVP_aes_128_ctr" -if test "x$ac_cv_func_EVP_aes_128_ctr" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_EVP_AES_128_CTR 1 -_ACEOF +printf "%s\n" "#define LIBSSH2_WINCNG 1" >>confdefs.h + + found_crypto="wincng" + found_crypto_str="Windows Cryptography API: Next Generation" + support_clear_memory="$ac_cv_have_decl_SecureZeroMemory" + + else + CPPFLAGS="$libssh2_save_CPPFLAGS" + fi + + + test "$found_crypto" = "none" && + crypto_errors="${crypto_errors}No wincng crypto library found! +" fi -done - LIBS="$save_LIBS" + ;; + yes|"") + crypto_errors="No crypto backend specified!" + ;; + *) + crypto_errors="Unknown crypto backend '${use_crypto}' specified!" + ;; +esac + +if test "$found_crypto" = "none"; then + crypto_errors="${crypto_errors} +Specify --with-crypto=\$backend and/or the neccessary library search prefix. - found_crypto="OpenSSL (AES-CTR: ${ac_cv_func_EVP_aes_128_ctr:-N/A})" +Known crypto backends: auto, openssl, libgcrypt, mbedtls, wincng" + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: ERROR: ${crypto_errors}" >&5 +printf "%s\n" "$as_me: ERROR: ${crypto_errors}" >&6;} +else + test "$found_crypto_str" = "" && found_crypto_str="$found_crypto" fi - if test "$ac_cv_libssl" = "yes"; then + if test "$found_crypto" = "openssl"; then OPENSSL_TRUE= OPENSSL_FALSE='#' else @@ -17028,15 +22206,7 @@ else OPENSSL_FALSE= fi - if test "$ac_cv_libbcrypt" = "yes"; then - WINCNG_TRUE= - WINCNG_FALSE='#' -else - WINCNG_TRUE='#' - WINCNG_FALSE= -fi - - if test "$ac_cv_libgcrypt" = "yes"; then + if test "$found_crypto" = "libgcrypt"; then LIBGCRYPT_TRUE= LIBGCRYPT_FALSE='#' else @@ -17044,7 +22214,7 @@ else LIBGCRYPT_FALSE= fi - if test "$ac_cv_libmbedtls" = "yes"; then + if test "$found_crypto" = "mbedtls"; then MBEDTLS_TRUE= MBEDTLS_FALSE='#' else @@ -17052,27 +22222,32 @@ else MBEDTLS_FALSE= fi - if false; then - OS400QC3_TRUE= - OS400QC3_FALSE='#' + if test "$found_crypto" = "wincng"; then + WINCNG_TRUE= + WINCNG_FALSE='#' else - OS400QC3_TRUE='#' - OS400QC3_FALSE= + WINCNG_TRUE='#' + WINCNG_FALSE= fi -# Check if crypto library was found -if test "$found_crypto" = "none"; then - as_fn_error $? "No crypto library found! -Try --with-libssl-prefix=PATH - or --with-libgcrypt-prefix=PATH - or --with-libmbedtls-prefix=PATH - or --with-wincng on Windows\ -" "$LINENO" 5 + +# libz + + +# Check whether --with-libz was given. +if test ${with_libz+y} +then : + withval=$with_libz; use_libz=$withval +else $as_nop + use_libz=auto fi -# Look for Libz -if test "$use_libz" != "no"; then + +found_libz=no +libz_errors="" + +if test "$use_libz" != no; then @@ -17098,7 +22273,8 @@ if test "$use_libz" != "no"; then # Check whether --with-libz-prefix was given. -if test "${with_libz_prefix+set}" = set; then : +if test ${with_libz_prefix+y} +then : withval=$with_libz_prefix; if test "X$withval" = "Xno"; then use_additional=no @@ -17540,11 +22716,12 @@ fi done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libz" >&5 -$as_echo_n "checking for libz... " >&6; } -if ${ac_cv_libz+:} false; then : - $as_echo_n "(cached) " >&6 -else + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for libz" >&5 +printf %s "checking for libz... " >&6; } +if test ${ac_cv_libz+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_save_LIBS="$LIBS" LIBS="$LIBS $LIBZ" @@ -17552,34 +22729,35 @@ else /* end confdefs.h. */ #include int -main () +main (void) { ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +if ac_fn_c_try_link "$LINENO" +then : ac_cv_libz=yes -else +else $as_nop ac_cv_libz=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_save_LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libz" >&5 -$as_echo "$ac_cv_libz" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_libz" >&5 +printf "%s\n" "$ac_cv_libz" >&6; } if test "$ac_cv_libz" = yes; then HAVE_LIBZ=yes -$as_echo "#define HAVE_LIBZ 1" >>confdefs.h +printf "%s\n" "#define HAVE_LIBZ 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libz" >&5 -$as_echo_n "checking how to link with libz... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBZ" >&5 -$as_echo "$LIBZ" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to link with libz" >&5 +printf %s "checking how to link with libz... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIBZ" >&5 +printf "%s\n" "$LIBZ" >&6; } else HAVE_LIBZ=no CPPFLAGS="$ac_save_CPPFLAGS" @@ -17595,18 +22773,22 @@ $as_echo "$LIBZ" >&6; } if test "$ac_cv_libz" != yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: Cannot find zlib, disabling compression" >&5 -$as_echo "$as_me: Cannot find zlib, disabling compression" >&6;} - { $as_echo "$as_me:${as_lineno-$LINENO}: Try --with-libz-prefix=PATH if you know you have it" >&5 -$as_echo "$as_me: Try --with-libz-prefix=PATH if you know you have it" >&6;} + if test "$use_libz" = auto; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: Cannot find libz, disabling compression" >&5 +printf "%s\n" "$as_me: Cannot find libz, disabling compression" >&6;} + found_libz="disabled; no libz found" + else + libz_errors="No libz found! +Try --with-libz-prefix=PATH if you know that you have it." + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: ERROR: $libz_errors" >&5 +printf "%s\n" "$as_me: ERROR: $libz_errors" >&6;} + fi else -$as_echo "#define LIBSSH2_HAVE_ZLIB 1" >>confdefs.h +printf "%s\n" "#define LIBSSH2_HAVE_ZLIB 1" >>confdefs.h - if test "${LIBSREQUIRED}" != ""; then - LIBSREQUIRED="${LIBSREQUIRED}," - fi - LIBSREQUIRED="${LIBSREQUIRED}zlib" + LIBSREQUIRED="$LIBSREQUIRED${LIBSREQUIRED:+ }zlib" + found_libz="yes" fi fi @@ -17616,49 +22798,53 @@ fi # Optional Settings # # Check whether --enable-crypt-none was given. -if test "${enable_crypt_none+set}" = set; then : +if test ${enable_crypt_none+y} +then : enableval=$enable_crypt_none; -$as_echo "#define LIBSSH2_CRYPT_NONE 1" >>confdefs.h +printf "%s\n" "#define LIBSSH2_CRYPT_NONE 1" >>confdefs.h fi # Check whether --enable-mac-none was given. -if test "${enable_mac_none+set}" = set; then : +if test ${enable_mac_none+y} +then : enableval=$enable_mac_none; -$as_echo "#define LIBSSH2_MAC_NONE 1" >>confdefs.h +printf "%s\n" "#define LIBSSH2_MAC_NONE 1" >>confdefs.h fi # Check whether --enable-gex-new was given. -if test "${enable_gex_new+set}" = set; then : +if test ${enable_gex_new+y} +then : enableval=$enable_gex_new; GEX_NEW=$enableval fi if test "$GEX_NEW" != "no"; then -$as_echo "#define LIBSSH2_DH_GEX_NEW 1" >>confdefs.h +printf "%s\n" "#define LIBSSH2_DH_GEX_NEW 1" >>confdefs.h fi # Check whether --enable-clear-memory was given. -if test "${enable_clear_memory+set}" = set; then : +if test ${enable_clear_memory+y} +then : enableval=$enable_clear_memory; CLEAR_MEMORY=$enableval fi if test "$CLEAR_MEMORY" != "no"; then if test "$support_clear_memory" = "yes"; then -$as_echo "#define LIBSSH2_CLEAR_MEMORY 1" >>confdefs.h +printf "%s\n" "#define LIBSSH2_CLEAR_MEMORY 1" >>confdefs.h enable_clear_memory=yes else if test "$CLEAR_MEMORY" = "yes"; then as_fn_error $? "secure clearing/zeroing of memory is not supported by the selected crypto backend" "$LINENO" 5 else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&5 -$as_echo "$as_me: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&5 +printf "%s\n" "$as_me: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&2;} fi enable_clear_memory=unsupported fi @@ -17666,23 +22852,158 @@ else if test "$support_clear_memory" = "yes"; then enable_clear_memory=no else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&5 -$as_echo "$as_me: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&5 +printf "%s\n" "$as_me: WARNING: secure clearing/zeroing of memory is not supported by the selected crypto backend" >&2;} enable_clear_memory=unsupported fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable pedantic and debug compiler options" >&5 -$as_echo_n "checking whether to enable pedantic and debug compiler options... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable pedantic and debug compiler options" >&5 +printf %s "checking whether to enable pedantic and debug compiler options... " >&6; } +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +printf %s "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if test ${ac_cv_prog_CPP+y} +then : + printf %s "(cached) " >&6 +else $as_nop + # Double quotes because $CC needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" cpp /lib/cpp + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO" +then : + +else $as_nop + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO" +then : + # Broken: success on invalid input. +continue +else $as_nop + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok +then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +printf "%s\n" "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO" +then : + +else $as_nop + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO" +then : + # Broken: success on invalid input. +continue +else $as_nop + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok +then : + +else $as_nop + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + # Check whether --enable-debug was given. -if test "${enable_debug+set}" = set; then : +if test ${enable_debug+y} +then : enableval=$enable_debug; case "$enable_debug" in no) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } + CPPFLAGS="$CPPFLAGS -DNDEBUG" ;; - *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } enable_debug=yes CPPFLAGS="$CPPFLAGS -DLIBSSH2DEBUG" CFLAGS="$CFLAGS -g" @@ -17691,44 +23012,45 @@ $as_echo "yes" >&6; } if test "z$ICC" = "z"; then ICC="no" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for icc in use" >&5 -$as_echo_n "checking for icc in use... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for icc in use" >&5 +printf %s "checking for icc in use... " >&6; } if test "$GCC" = "yes"; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ __INTEL_COMPILER _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "^__INTEL_COMPILER" >/dev/null 2>&1; then : + $EGREP "^__INTEL_COMPILER" >/dev/null 2>&1 +then : ICC="no" -else +else $as_nop ICC="yes" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } fi -rm -f conftest* +rm -rf conftest* fi if test "$ICC" = "no"; then # this is not ICC - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi fi if test "$GCC" = "yes"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking gcc version" >&5 -$as_echo_n "checking gcc version... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking gcc version" >&5 +printf %s "checking gcc version... " >&6; } gccver=`$CC -dumpversion` num1=`echo $gccver | cut -d . -f1` num2=`echo $gccver | cut -d . -f2` gccnum=`(expr $num1 "*" 100 + $num2) 2>/dev/null` - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gccver" >&5 -$as_echo "$gccver" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $gccver" >&5 +printf "%s\n" "$gccver" >&6; } if test "$ICC" = "yes"; then @@ -17779,12 +23101,12 @@ $as_echo "$gccver" >&6; } fi CFLAGS="$CFLAGS $WARN" - { $as_echo "$as_me:${as_lineno-$LINENO}: Added this set of compiler options: $WARN" >&5 -$as_echo "$as_me: Added this set of compiler options: $WARN" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: Added this set of compiler options: $WARN" >&5 +printf "%s\n" "$as_me: Added this set of compiler options: $WARN" >&6;} else - { $as_echo "$as_me:${as_lineno-$LINENO}: Added no extra compiler options" >&5 -$as_echo "$as_me: Added no extra compiler options" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: Added no extra compiler options" >&5 +printf "%s\n" "$as_me: Added no extra compiler options" >&6;} fi NEWFLAGS="" @@ -17803,66 +23125,68 @@ $as_echo "$as_me: Added no extra compiler options" >&6;} ;; esac -else +else $as_nop enable_debug=no - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable hidden symbols in the library" >&5 -$as_echo_n "checking whether to enable hidden symbols in the library... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable hidden symbols in the library" >&5 +printf %s "checking whether to enable hidden symbols in the library... " >&6; } # Check whether --enable-hidden-symbols was given. -if test "${enable_hidden_symbols+set}" = set; then : +if test ${enable_hidden_symbols+y} +then : enableval=$enable_hidden_symbols; case "$enableval" in no) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } ;; *) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports it" >&5 -$as_echo_n "checking whether $CC supports it... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC supports it" >&5 +printf %s "checking whether $CC supports it... " >&6; } if test "$GCC" = yes ; then if $CC --help --verbose 2>&1 | grep fvisibility= > /dev/null ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } -$as_echo "#define LIBSSH2_API __attribute__ ((visibility (\"default\")))" >>confdefs.h +printf "%s\n" "#define LIBSSH2_API __attribute__ ((visibility (\"default\")))" >>confdefs.h CFLAGS="$CFLAGS -fvisibility=hidden" else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi else if $CC 2>&1 | grep flags >/dev/null && $CC -flags | grep xldscope= >/dev/null ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } -$as_echo "#define LIBSSH2_API __global" >>confdefs.h +printf "%s\n" "#define LIBSSH2_API __global" >>confdefs.h CFLAGS="$CFLAGS -xldscope=hidden" else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi fi ;; esac -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } +else $as_nop + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi # Build example applications? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build example applications" >&5 -$as_echo_n "checking whether to build example applications... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build example applications" >&5 +printf %s "checking whether to build example applications... " >&6; } # Check whether --enable-examples-build was given. -if test "${enable_examples_build+set}" = set; then : +if test ${enable_examples_build+y} +then : enableval=$enable_examples_build; case "$enableval" in no | false) build_examples='no' @@ -17871,12 +23195,12 @@ if test "${enable_examples_build+set}" = set; then : build_examples='yes' ;; esac -else +else $as_nop build_examples='yes' fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $build_examples" >&5 -$as_echo "$build_examples" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $build_examples" >&5 +printf "%s\n" "$build_examples" >&6; } if test "x$build_examples" != "xno"; then BUILD_EXAMPLES_TRUE= BUILD_EXAMPLES_FALSE='#' @@ -17886,61 +23210,135 @@ else fi + +# Build OSS fuzzing targets? +# Check whether --enable-ossfuzzers was given. +if test ${enable_ossfuzzers+y} +then : + enableval=$enable_ossfuzzers; have_ossfuzzers=yes +else $as_nop + have_ossfuzzers=no +fi + + if test "x$have_ossfuzzers" = "xyes"; then + USE_OSSFUZZERS_TRUE= + USE_OSSFUZZERS_FALSE='#' +else + USE_OSSFUZZERS_TRUE='#' + USE_OSSFUZZERS_FALSE= +fi + + + +# Set the correct flags for the given fuzzing engine. + + if test "x$LIB_FUZZING_ENGINE" = "x-fsanitize=fuzzer"; then + USE_OSSFUZZ_FLAG_TRUE= + USE_OSSFUZZ_FLAG_FALSE='#' +else + USE_OSSFUZZ_FLAG_TRUE='#' + USE_OSSFUZZ_FLAG_FALSE= +fi + + if test -f "$LIB_FUZZING_ENGINE"; then + USE_OSSFUZZ_STATIC_TRUE= + USE_OSSFUZZ_STATIC_FALSE='#' +else + USE_OSSFUZZ_STATIC_TRUE='#' + USE_OSSFUZZ_STATIC_FALSE= +fi + + + # Checks for header files. # AC_HEADER_STDC -for ac_header in errno.h fcntl.h stdio.h stdlib.h unistd.h sys/uio.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF +ac_fn_c_check_header_compile "$LINENO" "errno.h" "ac_cv_header_errno_h" "$ac_includes_default" +if test "x$ac_cv_header_errno_h" = xyes +then : + printf "%s\n" "#define HAVE_ERRNO_H 1" >>confdefs.h fi +ac_fn_c_check_header_compile "$LINENO" "fcntl.h" "ac_cv_header_fcntl_h" "$ac_includes_default" +if test "x$ac_cv_header_fcntl_h" = xyes +then : + printf "%s\n" "#define HAVE_FCNTL_H 1" >>confdefs.h -done +fi +ac_fn_c_check_header_compile "$LINENO" "stdio.h" "ac_cv_header_stdio_h" "$ac_includes_default" +if test "x$ac_cv_header_stdio_h" = xyes +then : + printf "%s\n" "#define HAVE_STDIO_H 1" >>confdefs.h -for ac_header in sys/select.h sys/socket.h sys/ioctl.h sys/time.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF +fi +ac_fn_c_check_header_compile "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" +if test "x$ac_cv_header_stdlib_h" = xyes +then : + printf "%s\n" "#define HAVE_STDLIB_H 1" >>confdefs.h fi +ac_fn_c_check_header_compile "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default" +if test "x$ac_cv_header_unistd_h" = xyes +then : + printf "%s\n" "#define HAVE_UNISTD_H 1" >>confdefs.h -done +fi +ac_fn_c_check_header_compile "$LINENO" "sys/uio.h" "ac_cv_header_sys_uio_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_uio_h" = xyes +then : + printf "%s\n" "#define HAVE_SYS_UIO_H 1" >>confdefs.h -for ac_header in arpa/inet.h netinet/in.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF +fi + +ac_fn_c_check_header_compile "$LINENO" "sys/select.h" "ac_cv_header_sys_select_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_select_h" = xyes +then : + printf "%s\n" "#define HAVE_SYS_SELECT_H 1" >>confdefs.h fi +ac_fn_c_check_header_compile "$LINENO" "sys/socket.h" "ac_cv_header_sys_socket_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_socket_h" = xyes +then : + printf "%s\n" "#define HAVE_SYS_SOCKET_H 1" >>confdefs.h -done +fi +ac_fn_c_check_header_compile "$LINENO" "sys/ioctl.h" "ac_cv_header_sys_ioctl_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_ioctl_h" = xyes +then : + printf "%s\n" "#define HAVE_SYS_IOCTL_H 1" >>confdefs.h + +fi +ac_fn_c_check_header_compile "$LINENO" "sys/time.h" "ac_cv_header_sys_time_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_time_h" = xyes +then : + printf "%s\n" "#define HAVE_SYS_TIME_H 1" >>confdefs.h + +fi -for ac_header in sys/un.h +ac_fn_c_check_header_compile "$LINENO" "arpa/inet.h" "ac_cv_header_arpa_inet_h" "$ac_includes_default" +if test "x$ac_cv_header_arpa_inet_h" = xyes +then : + printf "%s\n" "#define HAVE_ARPA_INET_H 1" >>confdefs.h + +fi +ac_fn_c_check_header_compile "$LINENO" "netinet/in.h" "ac_cv_header_netinet_in_h" "$ac_includes_default" +if test "x$ac_cv_header_netinet_in_h" = xyes +then : + printf "%s\n" "#define HAVE_NETINET_IN_H 1" >>confdefs.h + +fi + + for ac_header in sys/un.h do : - ac_fn_c_check_header_mongrel "$LINENO" "sys/un.h" "ac_cv_header_sys_un_h" "$ac_includes_default" -if test "x$ac_cv_header_sys_un_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SYS_UN_H 1 -_ACEOF + ac_fn_c_check_header_compile "$LINENO" "sys/un.h" "ac_cv_header_sys_un_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_un_h" = xyes +then : + printf "%s\n" "#define HAVE_SYS_UN_H 1" >>confdefs.h have_sys_un_h=yes -else +else $as_nop have_sys_un_h=no fi done - if test "x$have_sys_un_h" = xyes; then HAVE_SYS_UN_H_TRUE= HAVE_SYS_UN_H_FALSE='#' @@ -17955,58 +23353,73 @@ case $host in # These are POSIX-like systems using BSD-like sockets API. ;; *) - for ac_header in windows.h winsock2.h ws2tcpip.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF + ac_fn_c_check_header_compile "$LINENO" "windows.h" "ac_cv_header_windows_h" "$ac_includes_default" +if test "x$ac_cv_header_windows_h" = xyes +then : + printf "%s\n" "#define HAVE_WINDOWS_H 1" >>confdefs.h fi +ac_fn_c_check_header_compile "$LINENO" "winsock2.h" "ac_cv_header_winsock2_h" "$ac_includes_default" +if test "x$ac_cv_header_winsock2_h" = xyes +then : + printf "%s\n" "#define HAVE_WINSOCK2_H 1" >>confdefs.h -done +fi +ac_fn_c_check_header_compile "$LINENO" "ws2tcpip.h" "ac_cv_header_ws2tcpip_h" "$ac_includes_default" +if test "x$ac_cv_header_ws2tcpip_h" = xyes +then : + printf "%s\n" "#define HAVE_WS2TCPIP_H 1" >>confdefs.h + +fi ;; esac case $host in *darwin*|*interix*) - { $as_echo "$as_me:${as_lineno-$LINENO}: poll use is disabled on this platform" >&5 -$as_echo "$as_me: poll use is disabled on this platform" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: poll use is disabled on this platform" >&5 +printf "%s\n" "$as_me: poll use is disabled on this platform" >&6;} ;; *) - for ac_func in poll -do : - ac_fn_c_check_func "$LINENO" "poll" "ac_cv_func_poll" -if test "x$ac_cv_func_poll" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_POLL 1 -_ACEOF + ac_fn_c_check_func "$LINENO" "poll" "ac_cv_func_poll" +if test "x$ac_cv_func_poll" = xyes +then : + printf "%s\n" "#define HAVE_POLL 1" >>confdefs.h fi -done ;; esac -for ac_func in gettimeofday select strtoll -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF +ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday" +if test "x$ac_cv_func_gettimeofday" = xyes +then : + printf "%s\n" "#define HAVE_GETTIMEOFDAY 1" >>confdefs.h + +fi +ac_fn_c_check_func "$LINENO" "select" "ac_cv_func_select" +if test "x$ac_cv_func_select" = xyes +then : + printf "%s\n" "#define HAVE_SELECT 1" >>confdefs.h + +fi +ac_fn_c_check_func "$LINENO" "strtoll" "ac_cv_func_strtoll" +if test "x$ac_cv_func_strtoll" = xyes +then : + printf "%s\n" "#define HAVE_STRTOLL 1" >>confdefs.h + +fi +ac_fn_c_check_func "$LINENO" "memset_s" "ac_cv_func_memset_s" +if test "x$ac_cv_func_memset_s" = xyes +then : + printf "%s\n" "#define HAVE_MEMSET_S 1" >>confdefs.h fi -done if test "$ac_cv_func_select" != "yes"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for select in ws2_32" >&5 -$as_echo_n "checking for select in ws2_32... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for select in ws2_32" >&5 +printf %s "checking for select in ws2_32... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -18018,7 +23431,7 @@ $as_echo_n "checking for select in ws2_32... " >&6; } #endif int -main () +main (void) { select(0,(fd_set *)NULL,(fd_set *)NULL,(fd_set *)NULL,(struct timeval *)NULL); @@ -18027,50 +23440,49 @@ main () return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +if ac_fn_c_try_link "$LINENO" +then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +printf "%s\n" "yes" >&6; } HAVE_SELECT="1" -cat >>confdefs.h <<_ACEOF -#define HAVE_SELECT 1 -_ACEOF +printf "%s\n" "#define HAVE_SELECT 1" >>confdefs.h -else +else $as_nop - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5 +printf "%s\n" "no" >&6; } fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext fi ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" -if test "x$ac_cv_type_size_t" = xyes; then : +if test "x$ac_cv_type_size_t" = xyes +then : -else +else $as_nop -cat >>confdefs.h <<_ACEOF -#define size_t unsigned int -_ACEOF +printf "%s\n" "#define size_t unsigned int" >>confdefs.h fi # The Ultrix 4.2 mips builtin alloca declared by alloca.h only works # for constant arguments. Useless! -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 -$as_echo_n "checking for working alloca.h... " >&6; } -if ${ac_cv_working_alloca_h+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 +printf %s "checking for working alloca.h... " >&6; } +if test ${ac_cv_working_alloca_h+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int -main () +main (void) { char *p = (char *) alloca (2 * sizeof (int)); if (p) return 0; @@ -18078,52 +23490,52 @@ char *p = (char *) alloca (2 * sizeof (int)); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +if ac_fn_c_try_link "$LINENO" +then : ac_cv_working_alloca_h=yes -else +else $as_nop ac_cv_working_alloca_h=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 -$as_echo "$ac_cv_working_alloca_h" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 +printf "%s\n" "$ac_cv_working_alloca_h" >&6; } if test $ac_cv_working_alloca_h = yes; then -$as_echo "#define HAVE_ALLOCA_H 1" >>confdefs.h +printf "%s\n" "#define HAVE_ALLOCA_H 1" >>confdefs.h fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 -$as_echo_n "checking for alloca... " >&6; } -if ${ac_cv_func_alloca_works+:} false; then : - $as_echo_n "(cached) " >&6 +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 +printf %s "checking for alloca... " >&6; } +if test ${ac_cv_func_alloca_works+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test $ac_cv_working_alloca_h = yes; then + ac_cv_func_alloca_works=yes else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#ifdef __GNUC__ -# define alloca __builtin_alloca -#else -# ifdef _MSC_VER +#include +#include +#ifndef alloca +# ifdef __GNUC__ +# define alloca __builtin_alloca +# elif defined _MSC_VER # include # define alloca _alloca # else -# ifdef HAVE_ALLOCA_H -# include -# else -# ifdef _AIX - #pragma alloca -# else -# ifndef alloca /* predefined by HP cc +Olibcalls */ -void *alloca (size_t); -# endif -# endif +# ifdef __cplusplus +extern "C" # endif +void *alloca (size_t); # endif #endif int -main () +main (void) { char *p = (char *) alloca (1); if (p) return 0; @@ -18131,20 +23543,22 @@ char *p = (char *) alloca (1); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +if ac_fn_c_try_link "$LINENO" +then : ac_cv_func_alloca_works=yes -else +else $as_nop ac_cv_func_alloca_works=no fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 -$as_echo "$ac_cv_func_alloca_works" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 +printf "%s\n" "$ac_cv_func_alloca_works" >&6; } +fi if test $ac_cv_func_alloca_works = yes; then -$as_echo "#define HAVE_ALLOCA 1" >>confdefs.h +printf "%s\n" "#define HAVE_ALLOCA 1" >>confdefs.h else # The SVR3 libPW and SVR4 libucb both contain incompatible functions @@ -18154,58 +23568,19 @@ else ALLOCA=\${LIBOBJDIR}alloca.$ac_objext -$as_echo "#define C_ALLOCA 1" >>confdefs.h - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5 -$as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; } -if ${ac_cv_os_cray+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#if defined CRAY && ! defined CRAY2 -webecray -#else -wenotbecray -#endif - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "webecray" >/dev/null 2>&1; then : - ac_cv_os_cray=yes -else - ac_cv_os_cray=no -fi -rm -f conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_os_cray" >&5 -$as_echo "$ac_cv_os_cray" >&6; } -if test $ac_cv_os_cray = yes; then - for ac_func in _getb67 GETB67 getb67; do - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - -cat >>confdefs.h <<_ACEOF -#define CRAY_STACKSEG_END $ac_func -_ACEOF - - break -fi +printf "%s\n" "#define C_ALLOCA 1" >>confdefs.h - done -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 -$as_echo_n "checking stack direction for C alloca... " >&6; } -if ${ac_cv_c_stack_direction+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 +printf %s "checking stack direction for C alloca... " >&6; } +if test ${ac_cv_c_stack_direction+y} +then : + printf %s "(cached) " >&6 +else $as_nop + if test "$cross_compiling" = yes +then : ac_cv_c_stack_direction=0 -else +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default @@ -18226,9 +23601,10 @@ main (int argc, char **argv) return find_stack_direction (0, argc + !argv + 20) < 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : +if ac_fn_c_try_run "$LINENO" +then : ac_cv_c_stack_direction=1 -else +else $as_nop ac_cv_c_stack_direction=-1 fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ @@ -18236,27 +23612,26 @@ rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 -$as_echo "$ac_cv_c_stack_direction" >&6; } -cat >>confdefs.h <<_ACEOF -#define STACK_DIRECTION $ac_cv_c_stack_direction -_ACEOF +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 +printf "%s\n" "$ac_cv_c_stack_direction" >&6; } +printf "%s\n" "#define STACK_DIRECTION $ac_cv_c_stack_direction" >>confdefs.h fi # Checks for typedefs, structures, and compiler characteristics. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 -$as_echo_n "checking for an ANSI C-conforming const... " >&6; } -if ${ac_cv_c_const+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 +printf %s "checking for an ANSI C-conforming const... " >&6; } +if test ${ac_cv_c_const+y} +then : + printf %s "(cached) " >&6 +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int -main () +main (void) { #ifndef __cplusplus @@ -18269,7 +23644,7 @@ main () /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; - /* AIX XL C 1.02.0.0 rejects this. + /* IBM XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ @@ -18297,7 +23672,7 @@ main () iptr p = 0; ++p; } - { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying + { /* IBM XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; @@ -18313,47 +23688,50 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_c_const=yes -else +else $as_nop ac_cv_c_const=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 -$as_echo "$ac_cv_c_const" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 +printf "%s\n" "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then -$as_echo "#define const /**/" >>confdefs.h +printf "%s\n" "#define const /**/" >>confdefs.h fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 -$as_echo_n "checking for inline... " >&6; } -if ${ac_cv_c_inline+:} false; then : - $as_echo_n "(cached) " >&6 -else +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 +printf %s "checking for inline... " >&6; } +if test ${ac_cv_c_inline+y} +then : + printf %s "(cached) " >&6 +else $as_nop ac_cv_c_inline=no for ac_kw in inline __inline__ __inline; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __cplusplus typedef int foo_t; -static $ac_kw foo_t static_foo () {return 0; } -$ac_kw foo_t foo () {return 0; } +static $ac_kw foo_t static_foo (void) {return 0; } +$ac_kw foo_t foo (void) {return 0; } #endif _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : ac_cv_c_inline=$ac_kw fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext test "$ac_cv_c_inline" != no && break done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 -$as_echo "$ac_cv_c_inline" >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 +printf "%s\n" "$ac_cv_c_inline" >&6; } case $ac_cv_c_inline in inline | yes) ;; @@ -18372,8 +23750,8 @@ esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking non-blocking sockets style" >&5 -$as_echo_n "checking non-blocking sockets style... " >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking non-blocking sockets style" >&5 +printf %s "checking non-blocking sockets style... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -18384,7 +23762,7 @@ $as_echo_n "checking non-blocking sockets style... " >&6; } #include int -main () +main (void) { /* try to compile O_NONBLOCK */ @@ -18410,14 +23788,15 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : nonblock="O_NONBLOCK" -$as_echo "#define HAVE_O_NONBLOCK 1" >>confdefs.h +printf "%s\n" "#define HAVE_O_NONBLOCK 1" >>confdefs.h -else +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -18428,7 +23807,7 @@ else #include int -main () +main (void) { /* FIONBIO source test (old-style unix) */ @@ -18439,14 +23818,15 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : nonblock="FIONBIO" -$as_echo "#define HAVE_FIONBIO 1" >>confdefs.h +printf "%s\n" "#define HAVE_FIONBIO 1" >>confdefs.h -else +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -18469,7 +23849,7 @@ else #endif int -main () +main (void) { /* ioctlsocket source code */ @@ -18482,14 +23862,15 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : nonblock="ioctlsocket" -$as_echo "#define HAVE_IOCTLSOCKET 1" >>confdefs.h +printf "%s\n" "#define HAVE_IOCTLSOCKET 1" >>confdefs.h -else +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -18499,7 +23880,7 @@ else #include int -main () +main (void) { /* IoctlSocket source code */ @@ -18510,14 +23891,15 @@ main () return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +if ac_fn_c_try_link "$LINENO" +then : nonblock="IoctlSocket" -$as_echo "#define HAVE_IOCTLSOCKET_CASE 1" >>confdefs.h +printf "%s\n" "#define HAVE_IOCTLSOCKET_CASE 1" >>confdefs.h -else +else $as_nop cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -18526,7 +23908,7 @@ else #include int -main () +main (void) { /* SO_NONBLOCK source code */ @@ -18538,49 +23920,98 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if ac_fn_c_try_compile "$LINENO" +then : nonblock="SO_NONBLOCK" -$as_echo "#define HAVE_SO_NONBLOCK 1" >>confdefs.h +printf "%s\n" "#define HAVE_SO_NONBLOCK 1" >>confdefs.h -else +else $as_nop nonblock="nada" -$as_echo "#define HAVE_DISABLED_NONBLOCKING 1" >>confdefs.h +printf "%s\n" "#define HAVE_DISABLED_NONBLOCKING 1" >>confdefs.h fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext \ +rm -f core conftest.err conftest.$ac_objext conftest.beam \ conftest$ac_exeext conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $nonblock" >&5 -$as_echo "$nonblock" >&6; } +rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $nonblock" >&5 +printf "%s\n" "$nonblock" >&6; } if test "$nonblock" = "nada"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: non-block sockets disabled" >&5 -$as_echo "$as_me: WARNING: non-block sockets disabled" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: non-block sockets disabled" >&5 +printf "%s\n" "$as_me: WARNING: non-block sockets disabled" >&2;} + fi + + +missing_required_deps=0 + +if test "${libz_errors}" != ""; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: ERROR: ${libz_errors}" >&5 +printf "%s\n" "$as_me: ERROR: ${libz_errors}" >&6;} + missing_required_deps=1 +fi + +if test "$found_crypto" = "none"; then + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: ERROR: ${crypto_errors}" >&5 +printf "%s\n" "$as_me: ERROR: ${crypto_errors}" >&6;} + missing_required_deps=1 +fi + +if test $missing_required_deps = 1; then + as_fn_error $? "Required dependencies are missing!" "$LINENO" 5 +fi + +# Configure parameters + + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable compiler warnings as errors" >&5 +printf %s "checking whether to enable compiler warnings as errors... " >&6; } + OPT_COMPILER_WERROR="default" + # Check whether --enable-werror was given. +if test ${enable_werror+y} +then : + enableval=$enable_werror; OPT_COMPILER_WERROR=$enableval +fi + + case "$OPT_COMPILER_WERROR" in + no) + want_werror="no" + ;; + default) + want_werror="no" + ;; + *) + want_werror="yes" + ;; + esac + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $want_werror" >&5 +printf "%s\n" "$want_werror" >&6; } + + if test X"$want_werror" = Xyes; then + CFLAGS="$CFLAGS -Werror" fi -ac_config_files="$ac_config_files Makefile src/Makefile tests/Makefile example/Makefile docs/Makefile libssh2.pc" +ac_config_files="$ac_config_files Makefile src/Makefile tests/Makefile tests/ossfuzz/Makefile example/Makefile docs/Makefile libssh2.pc" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure @@ -18609,8 +24040,8 @@ _ACEOF case $ac_val in #( *${as_nl}*) case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( @@ -18640,15 +24071,15 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; /^ac_cv_env_/b end t clear :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +printf "%s\n" "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else @@ -18662,8 +24093,8 @@ $as_echo "$as_me: updating cache $cache_file" >&6;} fi fi else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache @@ -18680,7 +24111,7 @@ U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" @@ -18695,14 +24126,14 @@ if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 -$as_echo_n "checking that generated files are newer than configure... " >&6; } +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 +printf %s "checking that generated files are newer than configure... " >&6; } if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 -$as_echo "done" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5 +printf "%s\n" "done" >&6; } if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' @@ -18723,6 +24154,10 @@ if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${SSHD_TRUE}" && test -z "${SSHD_FALSE}"; then as_fn_error $? "conditional \"SSHD\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 @@ -18732,10 +24167,6 @@ if test -z "${OPENSSL_TRUE}" && test -z "${OPENSSL_FALSE}"; then as_fn_error $? "conditional \"OPENSSL\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -if test -z "${WINCNG_TRUE}" && test -z "${WINCNG_FALSE}"; then - as_fn_error $? "conditional \"WINCNG\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi if test -z "${LIBGCRYPT_TRUE}" && test -z "${LIBGCRYPT_FALSE}"; then as_fn_error $? "conditional \"LIBGCRYPT\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 @@ -18744,14 +24175,26 @@ if test -z "${MBEDTLS_TRUE}" && test -z "${MBEDTLS_FALSE}"; then as_fn_error $? "conditional \"MBEDTLS\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -if test -z "${OS400QC3_TRUE}" && test -z "${OS400QC3_FALSE}"; then - as_fn_error $? "conditional \"OS400QC3\" was never defined. +if test -z "${WINCNG_TRUE}" && test -z "${WINCNG_FALSE}"; then + as_fn_error $? "conditional \"WINCNG\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${BUILD_EXAMPLES_TRUE}" && test -z "${BUILD_EXAMPLES_FALSE}"; then as_fn_error $? "conditional \"BUILD_EXAMPLES\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${USE_OSSFUZZERS_TRUE}" && test -z "${USE_OSSFUZZERS_FALSE}"; then + as_fn_error $? "conditional \"USE_OSSFUZZERS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USE_OSSFUZZ_FLAG_TRUE}" && test -z "${USE_OSSFUZZ_FLAG_FALSE}"; then + as_fn_error $? "conditional \"USE_OSSFUZZ_FLAG\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USE_OSSFUZZ_STATIC_TRUE}" && test -z "${USE_OSSFUZZ_STATIC_FALSE}"; then + as_fn_error $? "conditional \"USE_OSSFUZZ_STATIC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${HAVE_SYS_UN_H_TRUE}" && test -z "${HAVE_SYS_UN_H_FALSE}"; then as_fn_error $? "conditional \"HAVE_SYS_UN_H\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 @@ -18761,8 +24204,8 @@ fi ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL @@ -18785,14 +24228,16 @@ cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : +as_nop=: +if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1 +then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST -else +else $as_nop case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( @@ -18802,46 +24247,46 @@ esac fi + +# Reset variables that may have inherited troublesome values from +# the environment. + +# IFS needs to be set, to space, tab, and newline, in precisely that order. +# (If _AS_PATH_WALK were called with IFS unset, it would have the +# side effect of setting IFS to empty, thus disabling word splitting.) +# Quoting is to prevent editors from complaining about space-tab. as_nl=' ' export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi +IFS=" "" $as_nl" + +PS1='$ ' +PS2='> ' +PS4='+ ' + +# Ensure predictable behavior from utilities with locale-dependent output. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# We cannot yet rely on "unset" to work, but we need these variables +# to be unset--not just set to an empty or harmless value--now, to +# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh). This construct +# also avoids known problems related to "unset" and subshell syntax +# in other old shells (e.g. bash 2.01 and pdksh 5.2.14). +for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH +do eval test \${$as_var+y} \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done + +# Ensure that fds 0, 1, and 2 are open. +if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi +if (exec 3>&2) ; then :; else exec 2>/dev/null; fi # The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then +if ${PATH_SEPARATOR+false} :; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || @@ -18850,13 +24295,6 @@ if test "${PATH_SEPARATOR+set}" != set; then fi -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( @@ -18865,8 +24303,12 @@ case $0 in #(( for as_dir in $PATH do IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + case $as_dir in #((( + '') as_dir=./ ;; + */) ;; + *) as_dir=$as_dir/ ;; + esac + test -r "$as_dir$0" && as_myself=$as_dir$0 && break done IFS=$as_save_IFS @@ -18878,30 +24320,10 @@ if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] @@ -18914,13 +24336,14 @@ as_fn_error () as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi - $as_echo "$as_me: error: $2" >&2 + printf "%s\n" "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error + # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. @@ -18947,18 +24370,20 @@ as_fn_unset () { eval $1=; unset $1;} } as_unset=as_fn_unset + # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null +then : eval 'as_fn_append () { eval $1+=\$2 }' -else +else $as_nop as_fn_append () { eval $1=\$$1\$2 @@ -18970,12 +24395,13 @@ fi # as_fn_append # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null +then : eval 'as_fn_arith () { as_val=$(( $* )) }' -else +else $as_nop as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` @@ -19006,7 +24432,7 @@ as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | +printf "%s\n" X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q @@ -19028,6 +24454,10 @@ as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits + +# Determine whether it's possible to make 'echo' print without a newline. +# These variables are no longer used directly by Autoconf, but are AC_SUBSTed +# for compatibility with existing Makefiles. ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) @@ -19041,6 +24471,12 @@ case `echo -n x` in #((((( ECHO_N='-n';; esac +# For backward compatibility with old third-party macros, we provide +# the shell variables $as_echo and $as_echo_n. New code should use +# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively. +as_echo='printf %s\n' +as_echo_n='printf %s' + rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file @@ -19082,7 +24518,7 @@ as_fn_mkdir_p () as_dirs= while :; do case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" @@ -19091,7 +24527,7 @@ $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | +printf "%s\n" X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -19154,7 +24590,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # values after options handling. ac_log=" This file was extended by libssh2 $as_me -, which was -generated by GNU Autoconf 2.69. Invocation command line was +generated by GNU Autoconf 2.71. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS @@ -19216,14 +24652,16 @@ $config_commands Report bugs to ." _ACEOF +ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"` +ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"` cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_config='$ac_cs_config_escaped' ac_cs_version="\\ libssh2 config.status - -configured by $0, generated by GNU Autoconf 2.69, +configured by $0, generated by GNU Autoconf 2.71, with options \\"\$ac_cs_config\\" -Copyright (C) 2012 Free Software Foundation, Inc. +Copyright (C) 2021 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." @@ -19263,15 +24701,15 @@ do -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; + printf "%s\n" "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; + printf "%s\n" "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" @@ -19279,7 +24717,7 @@ do --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; @@ -19288,7 +24726,7 @@ do as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; + printf "%s\n" "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; @@ -19316,7 +24754,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" @@ -19330,7 +24768,7 @@ exec 5>>config.log sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX - $as_echo "$ac_log" + printf "%s\n" "$ac_log" } >&5 _ACEOF @@ -19338,7 +24776,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # -AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" +AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}" # The HP-UX ksh and POSIX shell print the target directory to stdout @@ -19484,6 +24922,60 @@ enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_sub enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' +predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' +postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' +predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' +postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' +LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' +reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' +reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' +GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' +inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' +link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' +always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' +exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' +postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' +predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' +postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' @@ -19566,7 +25058,38 @@ soname_spec \ install_override_mode \ finish_eval \ old_striplib \ -striplib; do +striplib \ +compiler_lib_search_dirs \ +predep_objects \ +postdep_objects \ +predeps \ +postdeps \ +compiler_lib_search_path \ +LD_CXX \ +reload_flag_CXX \ +compiler_CXX \ +lt_prog_compiler_no_builtin_flag_CXX \ +lt_prog_compiler_pic_CXX \ +lt_prog_compiler_wl_CXX \ +lt_prog_compiler_static_CXX \ +lt_cv_prog_compiler_c_o_CXX \ +export_dynamic_flag_spec_CXX \ +whole_archive_flag_spec_CXX \ +compiler_needs_object_CXX \ +with_gnu_ld_CXX \ +allow_undefined_flag_CXX \ +no_undefined_flag_CXX \ +hardcode_libdir_flag_spec_CXX \ +hardcode_libdir_separator_CXX \ +exclude_expsyms_CXX \ +include_expsyms_CXX \ +file_list_spec_CXX \ +compiler_lib_search_dirs_CXX \ +predep_objects_CXX \ +postdep_objects_CXX \ +predeps_CXX \ +postdeps_CXX \ +compiler_lib_search_path_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes @@ -19597,7 +25120,18 @@ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ configure_time_dlsearch_path \ -configure_time_lt_sys_library_path; do +configure_time_lt_sys_library_path \ +reload_cmds_CXX \ +old_archive_cmds_CXX \ +old_archive_from_new_cmds_CXX \ +old_archive_from_expsyms_cmds_CXX \ +archive_cmds_CXX \ +archive_expsym_cmds_CXX \ +module_cmds_CXX \ +module_expsym_cmds_CXX \ +export_symbols_cmds_CXX \ +prelink_cmds_CXX \ +postlink_cmds_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes @@ -19625,6 +25159,8 @@ fi + + _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 @@ -19634,12 +25170,12 @@ for ac_config_target in $ac_config_targets do case $ac_config_target in "src/libssh2_config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/libssh2_config.h" ;; - "example/libssh2_config.h") CONFIG_HEADERS="$CONFIG_HEADERS example/libssh2_config.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; + "tests/ossfuzz/Makefile") CONFIG_FILES="$CONFIG_FILES tests/ossfuzz/Makefile" ;; "example/Makefile") CONFIG_FILES="$CONFIG_FILES example/Makefile" ;; "docs/Makefile") CONFIG_FILES="$CONFIG_FILES docs/Makefile" ;; "libssh2.pc") CONFIG_FILES="$CONFIG_FILES libssh2.pc" ;; @@ -19654,9 +25190,9 @@ done # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers - test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands + test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files + test ${CONFIG_HEADERS+y} || CONFIG_HEADERS=$config_headers + test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree @@ -19992,7 +25528,7 @@ do esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done @@ -20000,17 +25536,17 @@ do # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +printf "%s\n" "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | + ac_sed_conf_input=`printf "%s\n" "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac @@ -20027,7 +25563,7 @@ $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | +printf "%s\n" X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -20051,9 +25587,9 @@ $as_echo X"$ac_file" | case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; @@ -20115,8 +25651,8 @@ ac_sed_dataroot=' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' @@ -20160,9 +25696,9 @@ test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" @@ -20178,20 +25714,20 @@ which seems to be undefined. Please make sure it is defined" >&2;} # if test x"$ac_file" != x-; then { - $as_echo "/* $configure_input */" \ + printf "%s\n" "/* $configure_input */" >&1 \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +printf "%s\n" "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else - $as_echo "/* $configure_input */" \ + printf "%s\n" "/* $configure_input */" >&1 \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi @@ -20211,7 +25747,7 @@ $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$_am_arg" | +printf "%s\n" X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -20231,8 +25767,8 @@ $as_echo X"$_am_arg" | s/.*/./; q'`/stamp-h$_am_stamp_count ;; - :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 -$as_echo "$as_me: executing $ac_file commands" >&6;} + :C) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +printf "%s\n" "$as_me: executing $ac_file commands" >&6;} ;; esac @@ -20242,29 +25778,35 @@ $as_echo "$as_me: executing $ac_file commands" >&6;} # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. - case $CONFIG_FILES in - *\'*) eval set x "$CONFIG_FILES" ;; - *) set x $CONFIG_FILES ;; - esac + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + case $CONFIG_FILES in #( + *\'*) : + eval set x "$CONFIG_FILES" ;; #( + *) : + set x $CONFIG_FILES ;; #( + *) : + ;; +esac shift - for mf + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf do # Strip MF so we end up with the name of the file. - mf=`echo "$mf" | sed -e 's/:.*$//'` - # Check whether this is an Automake generated Makefile or not. - # We used to match only the files named 'Makefile.in', but - # some people rename them; so instead we look at the file content. - # Grep'ing the first line is not enough: some people post-process - # each Makefile.in and add a new line on top of each file to say so. - # Grep'ing the whole file is not good either: AIX grep has a line + am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. - if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then - dirpart=`$as_dirname -- "$mf" || -$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$mf" : 'X\(//\)[^/]' \| \ - X"$mf" : 'X\(//\)$' \| \ - X"$mf" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$mf" | + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`$as_dirname -- "$am_mf" || +$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$am_mf" : 'X\(//\)[^/]' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X"$am_mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -20282,53 +25824,50 @@ $as_echo X"$mf" | q } s/.*/./; q'` - else - continue - fi - # Extract the definition of DEPDIR, am__include, and am__quote - # from the Makefile without running 'make'. - DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` - test -z "$DEPDIR" && continue - am__include=`sed -n 's/^am__include = //p' < "$mf"` - test -z "$am__include" && continue - am__quote=`sed -n 's/^am__quote = //p' < "$mf"` - # Find all dependency output files, they are included files with - # $(DEPDIR) in their names. We invoke sed twice because it is the - # simplest approach to changing $(DEPDIR) to its actual value in the - # expansion. - for file in `sed -n " - s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ - sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do - # Make sure the directory exists. - test -f "$dirpart/$file" && continue - fdir=`$as_dirname -- "$file" || -$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$file" : 'X\(//\)[^/]' \| \ - X"$file" : 'X\(//\)$' \| \ - X"$file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ + am_filepart=`$as_basename -- "$am_mf" || +$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +printf "%s\n" X/"$am_mf" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } - /^X\(\/\/\)$/{ + /^X\/\(\/\/\)$/{ s//\1/ q } - /^X\(\/\).*/{ + /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` - as_dir=$dirpart/$fdir; as_fn_mkdir_p - # echo "creating $dirpart/$file" - echo '# dummy' > "$dirpart/$file" - done + { echo "$as_me:$LINENO: cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles" >&5 + (cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } || am_rc=$? done + if test $am_rc -ne 0; then + { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE=\"gmake\" (or whatever is + necessary). You can also try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking). +See \`config.log' for more details" "$LINENO" 5; } + fi + { am_dirpart=; unset am_dirpart;} + { am_filepart=; unset am_filepart;} + { am_mf=; unset am_mf;} + { am_rc=; unset am_rc;} + rm -f conftest-deps.mk } ;; "libtool":C) @@ -20375,7 +25914,7 @@ $as_echo X"$file" | # The names of the tagged configurations supported by this script. -available_tags='' +available_tags='CXX ' # Configured defaults for sys_lib_dlsearch_path munging. : \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} @@ -20783,6 +26322,20 @@ file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + # ### END LIBTOOL CONFIG _LT_EOF @@ -20861,6 +26414,7 @@ _LT_EOF esac + ltmain=$ac_aux_dir/ltmain.sh @@ -20875,6 +26429,159 @@ ltmain=$ac_aux_dir/ltmain.sh (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + ;; esac @@ -20910,12 +26617,12 @@ if test "$no_create" != yes; then $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: summary of build options: +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: summary of build options: version: ${LIBSSH2VER} Host type: ${host} @@ -20923,14 +26630,14 @@ fi Compiler: ${CC} Compiler flags: ${CFLAGS} Library types: Shared=${enable_shared}, Static=${enable_static} - Crypto library: ${found_crypto} + Crypto library: ${found_crypto_str} Clear memory: $enable_clear_memory Debug build: $enable_debug Build examples: $build_examples Path to sshd: $ac_cv_path_SSHD (only for self-tests) - zlib compression: $ac_cv_libz + zlib compression: ${found_libz} " >&5 -$as_echo "$as_me: summary of build options: +printf "%s\n" "$as_me: summary of build options: version: ${LIBSSH2VER} Host type: ${host} @@ -20938,10 +26645,11 @@ $as_echo "$as_me: summary of build options: Compiler: ${CC} Compiler flags: ${CFLAGS} Library types: Shared=${enable_shared}, Static=${enable_static} - Crypto library: ${found_crypto} + Crypto library: ${found_crypto_str} Clear memory: $enable_clear_memory Debug build: $enable_debug Build examples: $build_examples Path to sshd: $ac_cv_path_SSHD (only for self-tests) - zlib compression: $ac_cv_libz + zlib compression: ${found_libz} " >&6;} + diff --git a/vendor/libssh2/configure.ac b/vendor/libssh2/configure.ac index c26a52b121..c4fc3e4e30 100644 --- a/vendor/libssh2/configure.ac +++ b/vendor/libssh2/configure.ac @@ -2,7 +2,7 @@ AC_INIT(libssh2, [-], libssh2-devel@cool.haxx.se) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_SRCDIR([src]) -AC_CONFIG_HEADERS([src/libssh2_config.h example/libssh2_config.h]) +AC_CONFIG_HEADERS([src/libssh2_config.h]) AM_MAINTAINER_MODE m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) @@ -36,12 +36,9 @@ case "$host" in CFLAGS="$CFLAGS -DLIBSSH2_WIN32" LIBS="$LIBS -lws2_32" ;; - *-cygwin) - CFLAGS="$CFLAGS -DLIBSSH2_WIN32" + *darwin*) + CFLAGS="$CFLAGS -DLIBSSH2_DARWIN" ;; - *darwin*) - CFLAGS="$CFLAGS -DLIBSSH2_DARWIN" - ;; *hpux*) ;; *osf*) @@ -69,6 +66,7 @@ AC_SEARCH_LIBS(inet_addr, nsl) AC_SUBST(LIBS) AC_PROG_CC +AC_PROG_CXX AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET @@ -83,79 +81,76 @@ AC_C_BIGENDIAN dnl check for how to do large files AC_SYS_LARGEFILE -found_crypto=none +# Crypto backends -# Configure parameters -AC_ARG_WITH(openssl, - AC_HELP_STRING([--with-openssl],[Use OpenSSL for crypto]), - use_openssl=$withval,use_openssl=auto) -AC_ARG_WITH(libgcrypt, - AC_HELP_STRING([--with-libgcrypt],[Use libgcrypt for crypto]), - [ use_libgcrypt=$withval - LIBSSH2_CHECKFOR_GCRYPT - ], use_libgcrypt=auto) -AC_ARG_WITH(wincng, - AC_HELP_STRING([--with-wincng],[Use Windows CNG for crypto]), - [ use_wincng=$withval - LIBSSH2_CHECKFOR_WINCNG - ] ,use_wincng=auto) -AC_ARG_WITH([mbedtls], - AC_HELP_STRING([--with-mbedtls],[Use mbedTLS for crypto]), - [ use_mbedtls=$withval - LIBSSH2_CHECKFOR_MBEDTLS - ], use_mbedtls=auto +found_crypto=none +found_crypto_str="" +support_clear_memory=no +crypto_errors="" + +m4_set_add([crypto_backends], [openssl]) +m4_set_add([crypto_backends], [libgcrypt]) +m4_set_add([crypto_backends], [mbedtls]) +m4_set_add([crypto_backends], [wincng]) + +AC_ARG_WITH([crypto], + AC_HELP_STRING([--with-crypto=auto|]m4_set_contents([crypto_backends], [|]), + [Select crypto backend (default: auto)]), + use_crypto=$withval, + use_crypto=auto ) -AC_ARG_WITH(libz, - AC_HELP_STRING([--with-libz],[Use zlib for compression]), - use_libz=$withval,use_libz=auto) -support_clear_memory=no +case "${use_crypto}" in + auto|m4_set_contents([crypto_backends], [|])) + m4_set_map([crypto_backends], [LIBSSH2_CHECK_CRYPTO]) + ;; + yes|"") + crypto_errors="No crypto backend specified!" + ;; + *) + crypto_errors="Unknown crypto backend '${use_crypto}' specified!" + ;; +esac + +if test "$found_crypto" = "none"; then + crypto_errors="${crypto_errors} +Specify --with-crypto=\$backend and/or the neccessary library search prefix. -# Look for OpenSSL -if test "$found_crypto" = "none" && test "$use_openssl" != "no"; then - AC_LIB_HAVE_LINKFLAGS([ssl], [crypto], [#include ]) +Known crypto backends: auto, m4_set_contents([crypto_backends], [, ])" + AS_MESSAGE([ERROR: ${crypto_errors}]) +else + test "$found_crypto_str" = "" && found_crypto_str="$found_crypto" fi -if test "$ac_cv_libssl" = "yes"; then - AC_DEFINE(LIBSSH2_OPENSSL, 1, [Use OpenSSL]) - LIBSREQUIRED=libssl,libcrypto - # Not all OpenSSL have AES-CTR functions. - save_LIBS="$LIBS" - LIBS="$LIBS $LIBSSL" - AC_CHECK_FUNCS(EVP_aes_128_ctr) - LIBS="$save_LIBS" +m4_set_foreach([crypto_backends], [backend], + [AM_CONDITIONAL(m4_toupper(backend), test "$found_crypto" = "backend")] +) - found_crypto="OpenSSL (AES-CTR: ${ac_cv_func_EVP_aes_128_ctr:-N/A})" -fi +# libz -AM_CONDITIONAL(OPENSSL, test "$ac_cv_libssl" = "yes") -AM_CONDITIONAL(WINCNG, test "$ac_cv_libbcrypt" = "yes") -AM_CONDITIONAL(LIBGCRYPT, test "$ac_cv_libgcrypt" = "yes") -AM_CONDITIONAL(MBEDTLS, test "$ac_cv_libmbedtls" = "yes") -AM_CONDITIONAL(OS400QC3, false) +AC_ARG_WITH([libz], + AC_HELP_STRING([--with-libz],[Use libz for compression]), + use_libz=$withval, + use_libz=auto) -# Check if crypto library was found -if test "$found_crypto" = "none"; then - AC_MSG_ERROR([No crypto library found! -Try --with-libssl-prefix=PATH - or --with-libgcrypt-prefix=PATH - or --with-libmbedtls-prefix=PATH - or --with-wincng on Windows\ -]) -fi +found_libz=no +libz_errors="" -# Look for Libz -if test "$use_libz" != "no"; then +if test "$use_libz" != no; then AC_LIB_HAVE_LINKFLAGS([z], [], [#include ]) if test "$ac_cv_libz" != yes; then - AC_MSG_NOTICE([Cannot find zlib, disabling compression]) - AC_MSG_NOTICE([Try --with-libz-prefix=PATH if you know you have it]) + if test "$use_libz" = auto; then + AC_MSG_NOTICE([Cannot find libz, disabling compression]) + found_libz="disabled; no libz found" + else + libz_errors="No libz found! +Try --with-libz-prefix=PATH if you know that you have it." + AS_MESSAGE([ERROR: $libz_errors]) + fi else AC_DEFINE(LIBSSH2_HAVE_ZLIB, 1, [Compile in zlib support]) - if test "${LIBSREQUIRED}" != ""; then - LIBSREQUIRED="${LIBSREQUIRED}," - fi - LIBSREQUIRED="${LIBSREQUIRED}zlib" + LIBSREQUIRED="$LIBSREQUIRED${LIBSREQUIRED:+ }zlib" + found_libz="yes" fi fi @@ -213,6 +208,7 @@ AC_HELP_STRING([--disable-debug],[Disable debug options]), [ case "$enable_debug" in no) AC_MSG_RESULT(no) + CPPFLAGS="$CPPFLAGS -DNDEBUG" ;; *) AC_MSG_RESULT(yes) enable_debug=yes @@ -284,6 +280,21 @@ esac], [build_examples='yes']) AC_MSG_RESULT($build_examples) AM_CONDITIONAL([BUILD_EXAMPLES], [test "x$build_examples" != "xno"]) + +# Build OSS fuzzing targets? +AC_ARG_ENABLE([ossfuzzers], + [AS_HELP_STRING([--enable-ossfuzzers], + [Whether to generate the fuzzers for OSS-Fuzz])], + [have_ossfuzzers=yes], [have_ossfuzzers=no]) +AM_CONDITIONAL([USE_OSSFUZZERS], [test "x$have_ossfuzzers" = "xyes"]) + + +# Set the correct flags for the given fuzzing engine. +AC_SUBST([LIB_FUZZING_ENGINE]) +AM_CONDITIONAL([USE_OSSFUZZ_FLAG], [test "x$LIB_FUZZING_ENGINE" = "x-fsanitize=fuzzer"]) +AM_CONDITIONAL([USE_OSSFUZZ_STATIC], [test -f "$LIB_FUZZING_ENGINE"]) + + # Checks for header files. # AC_HEADER_STDC AC_CHECK_HEADERS([errno.h fcntl.h stdio.h stdlib.h unistd.h sys/uio.h]) @@ -319,7 +330,7 @@ case $host in ;; esac -AC_CHECK_FUNCS(gettimeofday select strtoll) +AC_CHECK_FUNCS(gettimeofday select strtoll memset_s) dnl Check for select() into ws2_32 for Msys/Mingw if test "$ac_cv_func_select" != "yes"; then @@ -351,9 +362,29 @@ AC_C_INLINE CURL_CHECK_NONBLOCKING_SOCKET +missing_required_deps=0 + +if test "${libz_errors}" != ""; then + AS_MESSAGE([ERROR: ${libz_errors}]) + missing_required_deps=1 +fi + +if test "$found_crypto" = "none"; then + AS_MESSAGE([ERROR: ${crypto_errors}]) + missing_required_deps=1 +fi + +if test $missing_required_deps = 1; then + AC_MSG_ERROR([Required dependencies are missing!]) +fi + +# Configure parameters +LIBSSH2_CHECK_OPTION_WERROR + AC_CONFIG_FILES([Makefile src/Makefile tests/Makefile + tests/ossfuzz/Makefile example/Makefile docs/Makefile libssh2.pc]) @@ -367,10 +398,10 @@ AC_MSG_NOTICE([summary of build options: Compiler: ${CC} Compiler flags: ${CFLAGS} Library types: Shared=${enable_shared}, Static=${enable_static} - Crypto library: ${found_crypto} + Crypto library: ${found_crypto_str} Clear memory: $enable_clear_memory Debug build: $enable_debug Build examples: $build_examples Path to sshd: $ac_cv_path_SSHD (only for self-tests) - zlib compression: $ac_cv_libz + zlib compression: ${found_libz} ]) diff --git a/vendor/libssh2/depcomp b/vendor/libssh2/depcomp index fc98710e2a..6b391623c4 100755 --- a/vendor/libssh2/depcomp +++ b/vendor/libssh2/depcomp @@ -1,9 +1,9 @@ #! /bin/sh # depcomp - compile a program generating dependencies as side-effects -scriptversion=2013-05-30.07; # UTC +scriptversion=2018-03-07.03; # UTC -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2020 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -16,7 +16,7 @@ scriptversion=2013-05-30.07; # UTC # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -783,9 +783,9 @@ exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" +# time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: diff --git a/vendor/libssh2/docs/CMakeLists.txt b/vendor/libssh2/docs/CMakeLists.txt index 3e9d165ef8..b69ccced5c 100644 --- a/vendor/libssh2/docs/CMakeLists.txt +++ b/vendor/libssh2/docs/CMakeLists.txt @@ -38,8 +38,10 @@ set(MAN_PAGES libssh2_agent_disconnect.3 libssh2_agent_free.3 libssh2_agent_get_identity.3 + libssh2_agent_get_identity_path.3 libssh2_agent_init.3 libssh2_agent_list_identities.3 + libssh2_agent_set_identity_path.3 libssh2_agent_userauth.3 libssh2_banner_set.3 libssh2_base64_decode.3 @@ -134,6 +136,7 @@ set(MAN_PAGES libssh2_session_free.3 libssh2_session_get_blocking.3 libssh2_session_get_timeout.3 + libssh2_session_handshake.3 libssh2_session_hostkey.3 libssh2_session_init.3 libssh2_session_init_ex.3 @@ -190,6 +193,7 @@ set(MAN_PAGES libssh2_trace.3 libssh2_trace_sethandler.3 libssh2_userauth_authenticated.3 + libssh2_userauth_banner.3 libssh2_userauth_hostbased_fromfile.3 libssh2_userauth_hostbased_fromfile_ex.3 libssh2_userauth_keyboard_interactive.3 @@ -200,6 +204,7 @@ set(MAN_PAGES libssh2_userauth_publickey.3 libssh2_userauth_publickey_fromfile.3 libssh2_userauth_publickey_fromfile_ex.3 + libssh2_userauth_publickey_frommemory.3 libssh2_version.3) include(GNUInstallDirs) diff --git a/vendor/libssh2/docs/HACKING.CRYPTO b/vendor/libssh2/docs/HACKING-CRYPTO similarity index 52% rename from vendor/libssh2/docs/HACKING.CRYPTO rename to vendor/libssh2/docs/HACKING-CRYPTO index a8a6a0618b..85d813aa62 100644 --- a/vendor/libssh2/docs/HACKING.CRYPTO +++ b/vendor/libssh2/docs/HACKING-CRYPTO @@ -1,4 +1,4 @@ - Definitions needed to implement a specific crypto library + Definitions needed to implement a specific crypto library This document offers some hints about implementing a new crypto library interface. @@ -13,6 +13,38 @@ Procedures listed as "void" may indeed have a result type: the void indication indicates the libssh2 core modules never use the function result. +0) Build system. + +Adding a crypto backend to the autotools build system (./configure) is easy: + +0.1) Add one new line in configure.ac + +m4_set_add([crypto_backends], [newname]) + +This automatically creates a --with-crypto=newname option. + +0.2) Add an m4_case stanza to LIBSSH2_CRYPTO_CHECK in acinclude.m4 + +This must check for all required libraries, and if found set and AC_SUBST a +variable with the library linking flags. The recommended method is to use +LIBSSH2_LIB_HAVE_LINKFLAGS from LIBSSH2_CRYPTO_CHECK, which automatically +creates and handles a --with-$newname-prefix option and sets an +LTLIBNEWNAME variable on success. + +0.3) Create Makefile.newname.inc in the top-level directory + +This must set CRYPTO_CSOURCES, CRYPTO_HHEADERS and CRYPTO_LTLIBS. +Set CRYPTO_CSOURCES and CRYPTO_HHEADERS to the new backend source files +and set CRYPTO_LTLIBS to the required library linking parameters, e.g. +$(LTLIBNEWNAME) as generated by by LIBSSH2_LIB_HAVE_LINKFLAGS. + +0.4) Add a new block in src/Makefile.am + +if NEWNAME +include ../Makefile.newname.inc +endif + + 1) Crypto library initialization/termination. void libssh2_crypto_init(void); @@ -35,14 +67,14 @@ Note: if the ctx parameter is modified by the underlying code, this procedure must be implemented as a macro to map ctx --> &ctx. void libssh2_hmac_update(libssh2_hmac_ctx ctx, - const unsigned char *data, - int datalen); + const unsigned char *data, + int datalen); Continue computation of an HMAC on datalen bytes at data using context ctx. Note: if the ctx parameter is modified by the underlying code, this procedure must be implemented as a macro to map ctx --> &ctx. void libssh2_hmac_final(libssh2_hmac_ctx ctx, - unsigned char output[]); + unsigned char output[]); Get the computed HMAC from context ctx into the output buffer. The minimum data buffer size depends on the HMAC hash algorithm. Note: if the ctx parameter is modified by the underlying code, @@ -61,21 +93,21 @@ SHA_DIGEST_LENGTH #define to 20, the SHA-1 digest length. libssh2_sha1_ctx -Type of an SHA1 computation context. Generally a struct. +Type of an SHA-1 computation context. Generally a struct. int libssh2_sha1_init(libssh2_sha1_ctx *x); Initializes the SHA-1 computation context at x. Returns 1 for success and 0 for failure void libssh2_sha1_update(libssh2_sha1_ctx ctx, - const unsigned char *data, - size_t len); + const unsigned char *data, + size_t len); Continue computation of SHA-1 on len bytes at data using context ctx. Note: if the ctx parameter is modified by the underlying code, this procedure must be implemented as a macro to map ctx --> &ctx. void libssh2_sha1_final(libssh2_sha1_ctx ctx, - unsigned char output[SHA1_DIGEST_LEN]); + unsigned char output[SHA_DIGEST_LEN]); Get the computed SHA-1 signature from context ctx and store it into the output buffer. Release the context. @@ -83,8 +115,8 @@ Note: if the ctx parameter is modified by the underlying code, this procedure must be implemented as a macro to map ctx --> &ctx. void libssh2_hmac_sha1_init(libssh2_hmac_ctx *ctx, - const void *key, - int keylen); + const void *key, + int keylen); Setup the HMAC computation context ctx for an HMAC-SHA-1 computation using the keylen-byte key. Is invoked just after libssh2_hmac_ctx_init(). @@ -102,14 +134,14 @@ Initializes the SHA-256 computation context at x. Returns 1 for success and 0 for failure void libssh2_sha256_update(libssh2_sha256_ctx ctx, - const unsigned char *data, - size_t len); + const unsigned char *data, + size_t len); Continue computation of SHA-256 on len bytes at data using context ctx. Note: if the ctx parameter is modified by the underlying code, this procedure must be implemented as a macro to map ctx --> &ctx. void libssh2_sha256_final(libssh2_sha256_ctx ctx, - unsigned char output[SHA256_DIGEST_LENGTH]); + unsigned char output[SHA256_DIGEST_LENGTH]); Gets the computed SHA-256 signature from context ctx into the output buffer. Release the context. Note: if the ctx parameter is modified by the underlying code, @@ -128,26 +160,91 @@ LIBSSH2_HMAC_SHA256 If defined as 0, the rest of this section can be omitted. void libssh2_hmac_sha256_init(libssh2_hmac_ctx *ctx, - const void *key, - int keylen); + const void *key, + int keylen); Setup the HMAC computation context ctx for an HMAC-256 computation using the keylen-byte key. Is invoked just after libssh2_hmac_ctx_init(). -3.3) SHA-512 -LIBSSH2_HMAC_SHA512 -#define as 1 if the crypto library supports HMAC-SHA-512, else 0. -If defined as 0, the rest of this section can be omitted. +3.3) SHA-384 +Mandatory if ECDSA is implemented. Can be omitted otherwise. + +SHA384_DIGEST_LENGTH +#define to 48, the SHA-384 digest length. + +libssh2_sha384_ctx +Type of an SHA-384 computation context. Generally a struct. + +int libssh2_sha384_init(libssh2_sha384_ctx *x); +Initializes the SHA-384 computation context at x. +Returns 1 for success and 0 for failure + +void libssh2_sha384_update(libssh2_sha384_ctx ctx, + const unsigned char *data, + size_t len); +Continue computation of SHA-384 on len bytes at data using context ctx. +Note: if the ctx parameter is modified by the underlying code, +this procedure must be implemented as a macro to map ctx --> &ctx. + +void libssh2_sha384_final(libssh2_sha384_ctx ctx, + unsigned char output[SHA384_DIGEST_LENGTH]); +Gets the computed SHA-384 signature from context ctx into the output buffer. +Release the context. +Note: if the ctx parameter is modified by the underlying code, +this procedure must be implemented as a macro to map ctx --> &ctx. + +int libssh2_sha384(const unsigned char *message, + unsigned long len, + unsigned char output[SHA384_DIGEST_LENGTH]); +Computes the SHA-384 signature over the given message of length len and +store the result into the output buffer. +Return 1 if error, else 0. + +3.4) SHA-512 +Must always be implemented. SHA512_DIGEST_LENGTH #define to 64, the SHA-512 digest length. +libssh2_sha512_ctx +Type of an SHA-512 computation context. Generally a struct. + +int libssh2_sha512_init(libssh2_sha512_ctx *x); +Initializes the SHA-512 computation context at x. +Returns 1 for success and 0 for failure + +void libssh2_sha512_update(libssh2_sha512_ctx ctx, + const unsigned char *data, + size_t len); +Continue computation of SHA-512 on len bytes at data using context ctx. +Note: if the ctx parameter is modified by the underlying code, +this procedure must be implemented as a macro to map ctx --> &ctx. + +void libssh2_sha512_final(libssh2_sha512_ctx ctx, + unsigned char output[SHA512_DIGEST_LENGTH]); +Gets the computed SHA-512 signature from context ctx into the output buffer. +Release the context. +Note: if the ctx parameter is modified by the underlying code, +this procedure must be implemented as a macro to map ctx --> &ctx. + +int libssh2_sha512(const unsigned char *message, + unsigned long len, + unsigned char output[SHA512_DIGEST_LENGTH]); +Computes the SHA-512 signature over the given message of length len and +store the result into the output buffer. +Return 1 if error, else 0. +Note: Seems unused in current code, but defined in each crypto library backend. + +LIBSSH2_HMAC_SHA512 +#define as 1 if the crypto library supports HMAC-SHA-512, else 0. +If defined as 0, the rest of this section can be omitted. + void libssh2_hmac_sha512_init(libssh2_hmac_ctx *ctx, - const void *key, - int keylen); + const void *key, + int keylen); Setup the HMAC computation context ctx for an HMAC-512 computation using the keylen-byte key. Is invoked just after libssh2_hmac_ctx_init(). -3.4) MD5 +3.5) MD5 LIBSSH2_MD5 #define to 1 if the crypto library supports MD5, else 0. If defined as 0, the rest of this section can be omitted. @@ -163,40 +260,40 @@ Initializes the MD5 computation context at x. Returns 1 for success and 0 for failure void libssh2_md5_update(libssh2_md5_ctx ctx, - const unsigned char *data, - size_t len); + const unsigned char *data, + size_t len); Continues computation of MD5 on len bytes at data using context ctx. Returns 1 for success and 0 for failure. Note: if the ctx parameter is modified by the underlying code, this procedure must be implemented as a macro to map ctx --> &ctx. void libssh2_md5_final(libssh2_md5_ctx ctx, - unsigned char output[MD5_DIGEST_LENGTH]); + unsigned char output[MD5_DIGEST_LENGTH]); Gets the computed MD5 signature from context ctx into the output buffer. Release the context. Note: if the ctx parameter is modified by the underlying code, this procedure must be implemented as a macro to map ctx --> &ctx. void libssh2_hmac_md5_init(libssh2_hmac_ctx *ctx, - const void *key, - int keylen); + const void *key, + int keylen); Setup the HMAC computation context ctx for an HMAC-MD5 computation using the keylen-byte key. Is invoked just after libssh2_hmac_ctx_init(). -3.5) RIPEMD-160 +3.6) RIPEMD-160 LIBSSH2_HMAC_RIPEMD #define as 1 if the crypto library supports HMAC-RIPEMD-160, else 0. If defined as 0, the rest of this section can be omitted. void libssh2_hmac_ripemd160_init(libssh2_hmac_ctx *ctx, - const void *key, - int keylen); + const void *key, + int keylen); Setup the HMAC computation context ctx for an HMAC-RIPEMD-160 computation using the keylen-byte key. Is invoked just after libssh2_hmac_ctx_init(). Returns 1 for success and 0 for failure. -4) Bidirectional Key ciphers. +4) Bidirectional key ciphers. _libssh2_cipher_ctx Type of a cipher computation context. @@ -252,10 +349,6 @@ LIBSSH2_AES_CTR #define as 1 if the crypto library supports AES in CTR mode, else 0. If defined as 0, the rest of this section can be omitted. -void _libssh2_init_aes_ctr(void); -Initialize static AES CTR ciphers. -This procedure is already prototyped in crypto.h. - _libssh2_cipher_aes128ctr AES-128-CTR algorithm identifier initializer. #define with constant value of type _libssh2_cipher_type(). @@ -305,10 +398,42 @@ TripleDES-CBC algorithm identifier initializer. #define with constant value of type _libssh2_cipher_type(). -5) Big numbers. +5) Diffie-Hellman support. + +5.1) Diffie-Hellman context. +_libssh2_dh_ctx +Type of a Diffie-Hellman computation context. +Must always be defined. + +5.2) Diffie-Hellman computation procedures. +void libssh2_dh_init(_libssh2_dh_ctx *dhctx); +Initializes the Diffie-Hellman context at `dhctx'. No effective context +creation needed here. + +int libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, int group_order, + _libssh2_bn_ctx *bnctx); +Generates a Diffie-Hellman key pair using base `g', prime `p' and the given +`group_order'. Can use the given big number context `bnctx' if needed. +The private key is stored as opaque in the Diffie-Hellman context `*dhctx' and +the public key is returned in `public'. +0 is returned upon success, else -1. + +int libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p, _libssh2_bn_ctx * bnctx) +Computes the Diffie-Hellman secret from the previously created context `*dhctx', +the public key `f' from the other party and the same prime `p' used at +context creation. The result is stored in `secret'. +0 is returned upon success, else -1. + +void libssh2_dh_dtor(_libssh2_dh_ctx *dhctx) +Destroys Diffie-Hellman context at `dhctx' and resets its storage. + + +6) Big numbers. Positive multi-byte integers support is sufficient. -5.1) Computation contexts. +6.1) Computation contexts. This has a real meaning if the big numbers computations need some context storage. If not, use a dummy type and functions (macros). @@ -316,13 +441,13 @@ _libssh2_bn_ctx Type of multiple precision computation context. May not be empty. if not used, #define as char, for example. -libssh2_bn_ctx _libssh2_bn_ctx_new(void); +_libssh2_bn_ctx _libssh2_bn_ctx_new(void); Returns a new multiple precision computation context. void _libssh2_bn_ctx_free(_libssh2_bn_ctx ctx); Releases a multiple precision computation context. -5.2) Computation support. +6.2) Computation support. _libssh2_bn Type of multiple precision numbers (aka bignumbers or huge integers) for the crypto library. @@ -339,7 +464,7 @@ allocates the number. Returns a value of type _libssh2_bn *. void _libssh2_bn_free(_libssh2_bn *bn); Destroys the multiple precision number at bn. -unsigned long _libssh2_bn_bytes(libssh2_bn *bn); +unsigned long _libssh2_bn_bytes(_libssh2_bn *bn); Get the number of bytes needed to store the bits of the multiple precision number at bn. @@ -351,7 +476,7 @@ Sets the value of bn to val. Returns 1 on success, 0 otherwise. _libssh2_bn * _libssh2_bn_from_bin(_libssh2_bn *bn, int len, - const unsigned char *val); + const unsigned char *val); Converts the positive integer in big-endian form of length len at val into a _libssh2_bn and place it in bn. If bn is NULL, a new _libssh2_bn is created. @@ -362,22 +487,8 @@ Converts the absolute value of bn into big-endian form and store it at val. val must point to _libssh2_bn_bytes(bn) bytes of memory. Returns the length of the big-endian number. -void _libssh2_bn_rand(_libssh2_bn *bn, int bits, int top, int bottom); -Generates a cryptographically strong pseudo-random number of bits in -length and stores it in bn. If top is -1, the most significant bit of the -random number can be zero. If top is 0, it is set to 1, and if top is 1, the -two most significant bits of the number will be set to 1, so that the product -of two such random numbers will always have 2*bits length. If bottom is true, -the number will be odd. - -void _libssh2_bn_mod_exp(_libssh2_bn *r, _libssh2_bn *a, - _libssh2_bn *p, _libssh2_bn *m, - _libssh2_bn_ctx *ctx); -Computes a to the p-th power modulo m and stores the result into r (r=a^p % m). -May use the given context. - -6) Private key algorithms. +7) Private key algorithms. Format of an RSA public key: a) "ssh-rsa". b) RSA exponent, MSB first, with high order bit = 0. @@ -392,6 +503,17 @@ d) g, MSB first, with high order bit = 0. e) pub_key, MSB first, with high order bit = 0. Each item is preceded by its 32-bit byte length, MSB first. +Format of an ECDSA public key: +a) "ecdsa-sha2-nistp256" or "ecdsa-sha2-nistp384" or "ecdsa-sha2-nistp521". +b) domain: "nistp256", "nistp384" or "nistp521" matching a). +c) raw public key ("octal"). +Each item is preceded by its 32-bit byte length, MSB first. + +Format of an ED25519 public key: +a) "ssh-ed25519". +b) raw key (32 bytes). +Each item is preceded by its 32-bit byte length, MSB first. + int _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, unsigned char **method, size_t *method_len, @@ -421,7 +543,8 @@ Both buffers have to be allocated using LIBSSH2_ALLOC(). Returns 0 if OK, else -1. This procedure is already prototyped in crypto.h. -6.1) RSA + +7.1) RSA LIBSSH2_RSA #define as 1 if the crypto library supports RSA, else 0. If defined as 0, the rest of this section can be omitted. @@ -446,14 +569,14 @@ int _libssh2_rsa_new(libssh2_rsa_ctx **rsa, unsigned long e2len, const unsigned char *coeffdata, unsigned long coefflen); Creates a new context for RSA computations from key source values: - pdata, plen Prime number p. Only used if private key known (ddata). - qdata, qlen Prime number q. Only used if private key known (ddata). - ndata, nlen Modulus n. - edata, elen Exponent e. - ddata, dlen e^-1 % phi(n) = private key. May be NULL if unknown. - e1data, e1len dp = d % (p-1). Only used if private key known (dtata). - e2data, e2len dq = d % (q-1). Only used if private key known (dtata). - coeffdata, coefflen q^-1 % p. Only used if private key known. + pdata, plen Prime number p. Only used if private key known (ddata). + qdata, qlen Prime number q. Only used if private key known (ddata). + ndata, nlen Modulus n. + edata, elen Exponent e. + ddata, dlen e^-1 % phi(n) = private key. May be NULL if unknown. + e1data, e1len dp = d % (p-1). Only used if private key known (dtata). + e2data, e2len dq = d % (q-1). Only used if private key known (dtata). + coeffdata, coefflen q^-1 % p. Only used if private key known. Returns 0 if OK. This procedure is already prototyped in crypto.h. Note: the current generic code only calls this function with e and n (public @@ -472,7 +595,7 @@ This procedure is already prototyped in crypto.h. int _libssh2_rsa_new_private_frommemory(libssh2_rsa_ctx **rsa, LIBSSH2_SESSION *session, const char *data, - size_t data_len, + size_t data_len, unsigned const char *passphrase); Gets an RSA private key from data into a new RSA context. Must call _libssh2_init_if_needed(). @@ -483,15 +606,15 @@ int _libssh2_rsa_sha1_verify(libssh2_rsa_ctx *rsa, const unsigned char *sig, unsigned long sig_len, const unsigned char *m, unsigned long m_len); -Verify (sig, siglen) signature of (m, m_len) using an SHA-1 hash and the +Verify (sig, sig_len) signature of (m, m_len) using an SHA-1 hash and the RSA context. Return 0 if OK, else -1. This procedure is already prototyped in crypto.h. int _libssh2_rsa_sha1_signv(LIBSSH2_SESSION *session, - unsigned char **sig, size_t *siglen, - int count, const struct iovec vector[], - libssh2_rsa_ctx *ctx); + unsigned char **sig, size_t *siglen, + int count, const struct iovec vector[], + libssh2_rsa_ctx *ctx); RSA signs the SHA-1 hash computed over the count data chunks in vector. Signature is stored at (sig, siglen). Signature buffer must be allocated from the given session. @@ -514,8 +637,34 @@ Note: this procedure is not used if macro _libssh2_rsa_sha1_signv() is defined. void _libssh2_rsa_free(libssh2_rsa_ctx *rsactx); Releases the RSA computation context at rsactx. +LIBSSH2_RSA_SHA2 +#define as 1 if the crypto library supports RSA SHA2 256/512, else 0. +If defined as 0, the rest of this section can be omitted. + +int _libssh2_rsa_sha2_sign(LIBSSH2_SESSION * session, + libssh2_rsa_ctx * rsactx, + const unsigned char *hash, + size_t hash_len, + unsigned char **signature, + size_t *signature_len); +RSA signs the (hash, hashlen) SHA-2 hash bytes based on hash length and stores +the allocated signature at (signature, signature_len). +Signature buffer must be allocated from the given session. +Returns 0 if OK, else -1. +This procedure is already prototyped in crypto.h. +Note: this procedure is not used if macro _libssh2_rsa_sha1_signv() is defined. + +int _libssh2_rsa_sha2_verify(libssh2_rsa_ctx * rsa, + size_t hash_len, + const unsigned char *sig, + unsigned long sig_len, + const unsigned char *m, unsigned long m_len); +Verify (sig, sig_len) signature of (m, m_len) using an SHA-2 hash based on +hash length and the RSA context. +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. -6.2) DSA +7.2) DSA LIBSSH2_DSA #define as 1 if the crypto library supports DSA, else 0. If defined as 0, the rest of this section can be omitted. @@ -535,11 +684,11 @@ int _libssh2_dsa_new(libssh2_dsa_ctx **dsa, unsigned long ylen, const unsigned char *x, unsigned long x_len); Creates a new context for DSA computations from source key values: - pdata, plen Prime number p. Only used if private key known (ddata). - qdata, qlen Prime number q. Only used if private key known (ddata). - gdata, glen G number. - ydata, ylen Public key. - xdata, xlen Private key. Only taken if xlen non-zero. + pdata, plen Prime number p. Only used if private key known (ddata). + qdata, qlen Prime number q. Only used if private key known (ddata). + gdata, glen G number. + ydata, ylen Public key. + xdata, xlen Private key. Only taken if xlen non-zero. Returns 0 if OK. This procedure is already prototyped in crypto.h. @@ -555,7 +704,7 @@ This procedure is already prototyped in crypto.h. int _libssh2_dsa_new_private_frommemory(libssh2_dsa_ctx **dsa, LIBSSH2_SESSION *session, const char *data, - size_t data_len, + size_t data_len, unsigned const char *passphrase); Gets a DSA private key from the data_len-bytes data into a new DSA context. Must call _libssh2_init_if_needed(). @@ -565,7 +714,7 @@ This procedure is already prototyped in crypto.h. int _libssh2_dsa_sha1_verify(libssh2_dsa_ctx *dsactx, const unsigned char *sig, const unsigned char *m, unsigned long m_len); -Verify (sig, siglen) signature of (m, m_len) using an SHA1 hash and the +Verify (sig, siglen) signature of (m, m_len) using an SHA-1 hash and the DSA context. Returns 0 if OK, else -1. This procedure is already prototyped in crypto.h. @@ -581,7 +730,192 @@ void _libssh2_dsa_free(libssh2_dsa_ctx *dsactx); Releases the DSA computation context at dsactx. -7) Miscellaneous +7.3) ECDSA +LIBSSH2_ECDSA +#define as 1 if the crypto library supports ECDSA, else 0. +If defined as 0, _libssh2_ec_key should be defined as void and the rest of +this section can be omitted. + +EC_MAX_POINT_LEN +Maximum point length. Usually defined as ((528 * 2 / 8) + 1) (= 133). + +libssh2_ecdsa_ctx +Type of an ECDSA computation context. Generally a struct. + +_libssh2_ec_key +Type of an elliptic curve key. + +libssh2_curve_type +An enum type defining curve types. Current supported identifiers are: + LIBSSH2_EC_CURVE_NISTP256 + LIBSSH2_EC_CURVE_NISTP384 + LIBSSH2_EC_CURVE_NISTP521 + +int _libssh2_ecdsa_create_key(_libssh2_ec_key **out_private_key, + unsigned char **out_public_key_octal, + size_t *out_public_key_octal_len, + libssh2_curve_type curve_type); +Create a new ECDSA private key of type curve_type and return it at +out_private_key. If out_public_key_octal is not NULL, store an allocated +pointer to the associated public key in "octal" form in it and its length +at out_public_key_octal_len. +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ecdsa_new_private(libssh2_ecdsa_ctx **ec_ctx, + LIBSSH2_SESSION * session, + const char *filename, + unsigned const char *passphrase); +Reads an ECDSA private key from PEM file filename into a new ECDSA context. +Must call _libssh2_init_if_needed(). +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ecdsa_new_private_frommemory(libssh2_ecdsa_ctx ** ec_ctx, + LIBSSH2_SESSION * session, + const char *filedata, + size_t filedata_len, + unsigned const char *passphrase); +Builds an ECDSA private key from PEM data at filedata of length filedata_len +into a new ECDSA context stored at ec_ctx. +Must call _libssh2_init_if_needed(). +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ecdsa_curve_name_with_octal_new(libssh2_ecdsa_ctx **ecdsactx, + const unsigned char *k, + size_t k_len, + libssh2_curve_type type); +Stores at ecdsactx a new ECDSA context associated with the given curve type +and with "octal" form public key (k, k_len). +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ecdsa_new_openssh_private(libssh2_ecdsa_ctx **ec_ctx, + LIBSSH2_SESSION * session, + const char *filename, + unsigned const char *passphrase); +Reads a PEM-encoded ECDSA private key from file filename encrypted with +passphrase and stores at ec_ctx a new ECDSA context for it. +Return 0 if OK, else -1. +Currently used only from openssl backend (ought to be private). +This procedure is already prototyped in crypto.h. + +int _libssh2_ecdsa_sign(LIBSSH2_SESSION *session, libssh2_ecdsa_ctx *ec_ctx, + const unsigned char *hash, unsigned long hash_len, + unsigned char **signature, size_t *signature_len); +ECDSA signs the (hash, hashlen) hash bytes and stores the allocated +signature at (signature, signature_len). Hash algorithm used should be +SHA-256, SHA-384 or SHA-512 depending on type stored in ECDSA context at ec_ctx. +Signature buffer must be allocated from the given session. +Returns 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ecdsa_verify(libssh2_ecdsa_ctx *ctx, + const unsigned char *r, size_t r_len, + const unsigned char *s, size_t s_len, + const unsigned char *m, size_t m_len); +Verify the ECDSA signature made of (r, r_len) and (s, s_len) of (m, m_len) +using the hash algorithm configured in the ECDSA context ctx. +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +libssh2_curve_type _libssh2_ecdsa_get_curve_type(libssh2_ecdsa_ctx *ecdsactx); +Returns the curve type associated with given context. +This procedure is already prototyped in crypto.h. + +int _libssh2_ecdsa_curve_type_from_name(const char *name, + libssh2_curve_type *out_type); +Stores in out_type the curve type matching string name of the form +"ecdsa-sha2-nistpxxx". +Return 0 if OK, else -1. +Currently used only from openssl backend (ought to be private). +This procedure is already prototyped in crypto.h. + +void _libssh2_ecdsa_free(libssh2_ecdsa_ctx *ecdsactx); +Releases the ECDSA computation context at ecdsactx. + + +7.4) ED25519 +LIBSSH2_ED25519 +#define as 1 if the crypto library supports ED25519, else 0. +If defined as 0, the rest of this section can be omitted. + + +libssh2_ed25519_ctx +Type of an ED25519 computation context. Generally a struct. + +int _libssh2_curve25519_new(LIBSSH2_SESSION *session, libssh2_ed25519_ctx **ctx, + uint8_t **out_public_key, + uint8_t **out_private_key); +Generates an ED25519 key pair, stores a pointer to them at out_private_key +and out_public_key respectively and stores at ctx a new ED25519 context for +this key. +Argument ctx, out_private_key and out_public key may be NULL to disable storing +the corresponding value. +Length of each key is LIBSSH2_ED25519_KEY_LEN (32 bytes). +Key buffers are allocated and should be released by caller after use. +Returns 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ed25519_new_private(libssh2_ed25519_ctx **ed_ctx, + LIBSSH2_SESSION *session, + const char *filename, + const uint8_t *passphrase); +Reads an ED25519 private key from PEM file filename into a new ED25519 context. +Must call _libssh2_init_if_needed(). +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ed25519_new_public(libssh2_ed25519_ctx **ed_ctx, + LIBSSH2_SESSION *session, + const unsigned char *raw_pub_key, + const uint8_t key_len); +Stores at ed_ctx a new ED25519 key context for raw public key (raw_pub_key, +key_len). +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ed25519_new_private_frommemory(libssh2_ed25519_ctx **ed_ctx, + LIBSSH2_SESSION *session, + const char *filedata, + size_t filedata_len, + unsigned const char *passphrase); +Builds an ED25519 private key from PEM data at filedata of length filedata_len +into a new ED25519 context stored at ed_ctx. +Must call _libssh2_init_if_needed(). +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ed25519_sign(libssh2_ed25519_ctx *ctx, LIBSSH2_SESSION *session, + uint8_t **out_sig, size_t *out_sig_len, + const uint8_t *message, size_t message_len); +ED25519 signs the (message, message_len) bytes and stores the allocated +signature at (sig, sig_len). +Signature buffer is allocated from the given session. +Returns 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_ed25519_verify(libssh2_ed25519_ctx *ctx, const uint8_t *s, + size_t s_len, const uint8_t *m, size_t m_len); +Verify (s, s_len) signature of (m, m_len) using the given ED25519 context. +Return 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +int _libssh2_curve25519_gen_k(_libssh2_bn **k, + uint8_t private_key[LIBSSH2_ED25519_KEY_LEN], + uint8_t srvr_public_key[LIBSSH2_ED25519_KEY_LEN]); +Computes a shared ED25519 secret key from the given raw server public key and +raw client public key and stores it as a big number in *k. Big number should +have been initialized before calling this function. +Returns 0 if OK, else -1. +This procedure is already prototyped in crypto.h. + +void _libssh2_ed25519_free(libssh2_ed25519_ctx *ed25519ctx); +Releases the ED25519 computation context at ed25519ctx. + + +8) Miscellaneous void libssh2_prepare_iovec(struct iovec *vector, unsigned int len); Prepare len consecutive iovec slots before using them. @@ -589,5 +923,17 @@ In example, this is needed to preset unused structure slacks on platforms requiring it. If this is not needed, it should be defined as an empty macro. -void _libssh2_random(unsigned char *buf, int len); +int _libssh2_random(unsigned char *buf, int len); Store len random bytes at buf. +Returns 0 if OK, else -1. + +const char * _libssh2_supported_key_sign_algorithms(LIBSSH2_SESSION *session, + unsigned char *key_method, + size_t key_method_len); + +This function is for implementing key hash upgrading as defined in RFC 8332. + +Based on the incoming key_method value, this function will return a +list of supported algorithms that can upgrade the original key method algorithm +as a comma seperated list, if there is no upgrade option this function should +return NULL. diff --git a/vendor/libssh2/docs/INSTALL_AUTOTOOLS b/vendor/libssh2/docs/INSTALL_AUTOTOOLS index d6eae59af6..a75b51814d 100644 --- a/vendor/libssh2/docs/INSTALL_AUTOTOOLS +++ b/vendor/libssh2/docs/INSTALL_AUTOTOOLS @@ -7,6 +7,22 @@ Software Foundation, Inc. This file is free documentation; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. +When Building directly from Master +================================== + +If you want to build directly from the git repository, you must first +generate the configure script and Makefile using autotools. There is +a convenience script that calls all tools in the correct order. Make +sure that autoconf, automake and libtool are installed on your system, +then execute: + + autoreconf -fi + +After executing this script, you can build the project as usual: + + ./configure + make + Basic Installation ================== diff --git a/vendor/libssh2/docs/INSTALL_CMAKE b/vendor/libssh2/docs/INSTALL_CMAKE.md similarity index 96% rename from vendor/libssh2/docs/INSTALL_CMAKE rename to vendor/libssh2/docs/INSTALL_CMAKE.md index e0b8515101..c136fdcee8 100644 --- a/vendor/libssh2/docs/INSTALL_CMAKE +++ b/vendor/libssh2/docs/INSTALL_CMAKE.md @@ -20,10 +20,12 @@ Getting started If you are happy with the default options, make a new build directory, change to it, configure the build environment and build the project: +``` mkdir bin cd bin cmake .. cmake --build . +``` libssh2 will be built as a static library and will use any cryptography library available. The library binary will be put in @@ -40,6 +42,11 @@ pass the options to CMake on the command line: The following options are available: + * `LINT=ON` + + Enables running the source code linter when building. Can be `ON` or `OFF`. + + * `BUILD_SHARED_LIBS=OFF` Determines whether libssh2 is built as a static library or as a @@ -119,20 +126,27 @@ Tests To test the build, run the appropriate test target for your build system. For example: +``` cmake --build . --target test +``` or +``` cmake --build . --target RUN_TESTS +``` How do I use libssh2 in my project if my project doesn't use CMake? ------------------------------------------------------------------- If you are not using CMake for your own project, install libssh2 - +``` cmake cmake --build . cmake --build . --target install +``` or +``` cmake --build . --target INSTALL +``` and then specify the install location to your project in the normal way for your build environment. If you don't like the default install @@ -176,4 +190,4 @@ builds your project: [1] https://www.cmake.org/cmake/resources/software.html [2] https://www.cmake.org/cmake/help/v3.0/manual/cmake-packages.7.html [3] https://www.cmake.org/cmake/help/v3.0/manual/cmake-packages.7.html#package-registry -[4] http://www.kitware.com/media/html/BuildingExternalProjectsWithCMake2.8.html +[4] https://blog.kitware.com/wp-content/uploads/2016/01/kitware_quarterly1009.pdf diff --git a/vendor/libssh2/docs/Makefile.am b/vendor/libssh2/docs/Makefile.am index 688d8d00a8..a69a16ca6f 100644 --- a/vendor/libssh2/docs/Makefile.am +++ b/vendor/libssh2/docs/Makefile.am @@ -1,15 +1,17 @@ # $Id: Makefile.am,v 1.37 2009/03/26 15:41:15 bagder Exp $ -EXTRA_DIST = template.3 BINDINGS INSTALL_AUTOTOOLS INSTALL_CMAKE HACKING TODO \ - AUTHORS CMakeLists.txt HACKING.CRYPTO SECURITY.md +EXTRA_DIST = template.3 BINDINGS INSTALL_AUTOTOOLS INSTALL_CMAKE.md HACKING TODO \ + AUTHORS CMakeLists.txt HACKING-CRYPTO SECURITY.md dist_man_MANS = \ libssh2_agent_connect.3 \ libssh2_agent_disconnect.3 \ libssh2_agent_free.3 \ libssh2_agent_get_identity.3 \ + libssh2_agent_get_identity_path.3 \ libssh2_agent_init.3 \ libssh2_agent_list_identities.3 \ + libssh2_agent_set_identity_path.3 \ libssh2_agent_userauth.3 \ libssh2_banner_set.3 \ libssh2_base64_decode.3 \ @@ -161,6 +163,7 @@ dist_man_MANS = \ libssh2_trace.3 \ libssh2_trace_sethandler.3 \ libssh2_userauth_authenticated.3 \ + libssh2_userauth_banner.3 \ libssh2_userauth_hostbased_fromfile.3 \ libssh2_userauth_hostbased_fromfile_ex.3 \ libssh2_userauth_keyboard_interactive.3 \ diff --git a/vendor/libssh2/docs/Makefile.in b/vendor/libssh2/docs/Makefile.in index 857a393158..40143ed18c 100644 --- a/vendor/libssh2/docs/Makefile.in +++ b/vendor/libssh2/docs/Makefile.in @@ -1,7 +1,7 @@ -# Makefile.in generated by automake 1.15 from Makefile.am. +# Makefile.in generated by automake 1.16.4 from Makefile.am. # @configure_input@ -# Copyright (C) 1994-2014 Free Software Foundation, Inc. +# Copyright (C) 1994-2021 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -101,8 +101,7 @@ am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h \ - $(top_builddir)/example/libssh2_config.h +CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) @@ -173,6 +172,12 @@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ @@ -183,13 +188,14 @@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ +ETAGS = @ETAGS@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ HAVE_LIBBCRYPT = @HAVE_LIBBCRYPT@ HAVE_LIBCRYPT32 = @HAVE_LIBCRYPT32@ HAVE_LIBGCRYPT = @HAVE_LIBGCRYPT@ -HAVE_LIBMBEDTLS = @HAVE_LIBMBEDTLS@ +HAVE_LIBMBEDCRYPTO = @HAVE_LIBMBEDCRYPTO@ HAVE_LIBSSL = @HAVE_LIBSSL@ HAVE_LIBZ = @HAVE_LIBZ@ INSTALL = @INSTALL@ @@ -205,8 +211,8 @@ LIBCRYPT32 = @LIBCRYPT32@ LIBCRYPT32_PREFIX = @LIBCRYPT32_PREFIX@ LIBGCRYPT = @LIBGCRYPT@ LIBGCRYPT_PREFIX = @LIBGCRYPT_PREFIX@ -LIBMBEDTLS = @LIBMBEDTLS@ -LIBMBEDTLS_PREFIX = @LIBMBEDTLS_PREFIX@ +LIBMBEDCRYPTO = @LIBMBEDCRYPTO@ +LIBMBEDCRYPTO_PREFIX = @LIBMBEDCRYPTO_PREFIX@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSREQUIRED = @LIBSREQUIRED@ @@ -216,12 +222,13 @@ LIBSSL_PREFIX = @LIBSSL_PREFIX@ LIBTOOL = @LIBTOOL@ LIBZ = @LIBZ@ LIBZ_PREFIX = @LIBZ_PREFIX@ +LIB_FUZZING_ENGINE = @LIB_FUZZING_ENGINE@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBBCRYPT = @LTLIBBCRYPT@ LTLIBCRYPT32 = @LTLIBCRYPT32@ LTLIBGCRYPT = @LTLIBGCRYPT@ -LTLIBMBEDTLS = @LTLIBMBEDTLS@ +LTLIBMBEDCRYPTO = @LTLIBMBEDCRYPTO@ LTLIBOBJS = @LTLIBOBJS@ LTLIBSSL = @LTLIBSSL@ LTLIBZ = @LTLIBZ@ @@ -257,6 +264,7 @@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ @@ -304,16 +312,18 @@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -EXTRA_DIST = template.3 BINDINGS INSTALL_AUTOTOOLS INSTALL_CMAKE HACKING TODO \ - AUTHORS CMakeLists.txt HACKING.CRYPTO SECURITY.md +EXTRA_DIST = template.3 BINDINGS INSTALL_AUTOTOOLS INSTALL_CMAKE.md HACKING TODO \ + AUTHORS CMakeLists.txt HACKING-CRYPTO SECURITY.md dist_man_MANS = \ libssh2_agent_connect.3 \ libssh2_agent_disconnect.3 \ libssh2_agent_free.3 \ libssh2_agent_get_identity.3 \ + libssh2_agent_get_identity_path.3 \ libssh2_agent_init.3 \ libssh2_agent_list_identities.3 \ + libssh2_agent_set_identity_path.3 \ libssh2_agent_userauth.3 \ libssh2_banner_set.3 \ libssh2_base64_decode.3 \ @@ -498,8 +508,8 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) @@ -565,8 +575,10 @@ ctags CTAGS: cscope cscopelist: +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am -distdir: $(DISTFILES) +distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ diff --git a/vendor/libssh2/docs/SECURITY.md b/vendor/libssh2/docs/SECURITY.md index 83cf65b7a2..6f442eb634 100644 --- a/vendor/libssh2/docs/SECURITY.md +++ b/vendor/libssh2/docs/SECURITY.md @@ -64,7 +64,7 @@ announcement. [distros@openwall](http://oss-security.openwall.org/wiki/mailing-lists/distros) when also informing and preparing them for the upcoming public security vulnerability announcement - attach the advisory draft for information. Note - that 'distros' won't accept an embargo longer than 19 days. + that 'distros' won't accept an embargo longer than 14 days. - Update the "security advisory" with the CVE number. @@ -96,5 +96,5 @@ libssh2 project and you have shown an understanding for the project and its way of working. You must've been around for a good while and you should have no plans in vanishing in the near future. -We do not make the list of partipants public mostly because it tends to vary +We do not make the list of participants public mostly because it tends to vary somewhat over time and a list somewhere will only risk getting outdated. diff --git a/vendor/libssh2/docs/libssh2_agent_get_identity_path.3 b/vendor/libssh2/docs/libssh2_agent_get_identity_path.3 new file mode 100644 index 0000000000..58d6dd569a --- /dev/null +++ b/vendor/libssh2/docs/libssh2_agent_get_identity_path.3 @@ -0,0 +1,22 @@ +.\" +.\" Copyright (c) 2019 by Will Cosgrove +.\" +.TH libssh2_agent_get_identity_path 3 "6 Mar 2019" "libssh2 1.9" "libssh2 manual" +.SH NAME +libssh2_agent_get_identity_path - gets the custom ssh-agent socket path +.SH SYNOPSIS +#include + +const char * +libssh2_agent_get_identity_path(LIBSSH2_AGENT *agent); +.SH DESCRIPTION +Returns the custom agent identity socket path if set using libssh2_agent_set_identity_path() + +.SH RETURN VALUE +Returns the socket path on disk. +.SH AVAILABILITY +Added in libssh2 1.9 +.SH SEE ALSO +.BR libssh2_agent_init(3) +.BR libssh2_agent_set_identity_path(3) + diff --git a/vendor/libssh2/docs/libssh2_agent_set_identity_path.3 b/vendor/libssh2/docs/libssh2_agent_set_identity_path.3 new file mode 100644 index 0000000000..73e1266d1e --- /dev/null +++ b/vendor/libssh2/docs/libssh2_agent_set_identity_path.3 @@ -0,0 +1,22 @@ +.\" +.\" Copyright (c) 2019 by Will Cosgrove +.\" +.TH libssh2_agent_set_identity_path 3 "6 Mar 2019" "libssh2 1.9" "libssh2 manual" +.SH NAME +libssh2_agent_set_identity_path - set an ssh-agent socket path on disk +.SH SYNOPSIS +#include + +void +libssh2_agent_set_identity_path(LIBSSH2_AGENT *agent, const char *path); +.SH DESCRIPTION +Allows a custom agent identity socket path instead of the default SSH_AUTH_SOCK env value + +.SH RETURN VALUE +Returns void +.SH AVAILABILITY +Added in libssh2 1.9 +.SH SEE ALSO +.BR libssh2_agent_init(3) +.BR libssh2_agent_get_identity_path(3) + diff --git a/vendor/libssh2/docs/libssh2_channel_wait_eof.3 b/vendor/libssh2/docs/libssh2_channel_wait_eof.3 index 47587e6b1b..8a3dc47570 100644 --- a/vendor/libssh2/docs/libssh2_channel_wait_eof.3 +++ b/vendor/libssh2/docs/libssh2_channel_wait_eof.3 @@ -8,7 +8,7 @@ int libssh2_channel_wait_eof(LIBSSH2_CHANNEL *channel); .SH DESCRIPTION -Wait for the remote end to acknowledge an EOF request. +Wait for the remote end to send EOF. .SH RETURN VALUE Return 0 on success or negative on failure. It returns diff --git a/vendor/libssh2/docs/libssh2_hostkey_hash.3 b/vendor/libssh2/docs/libssh2_hostkey_hash.3 index c2f164400d..d57fc0dd5f 100644 --- a/vendor/libssh2/docs/libssh2_hostkey_hash.3 +++ b/vendor/libssh2/docs/libssh2_hostkey_hash.3 @@ -11,12 +11,12 @@ libssh2_hostkey_hash(LIBSSH2_SESSION *session, int hash_type); \fIsession\fP - Session instance as returned by .BR libssh2_session_init_ex(3) -\fIhash_type\fP - One of: \fBLIBSSH2_HOSTKEY_HASH_MD5\fP or -\fBLIBSSH2_HOSTKEY_HASH_SHA1\fP. +\fIhash_type\fP - One of: \fBLIBSSH2_HOSTKEY_HASH_MD5\fP, +\fBLIBSSH2_HOSTKEY_HASH_SHA1\fP or \fBLIBSSH2_HOSTKEY_HASH_SHA256\fP. Returns the computed digest of the remote system's hostkey. The length of the returned string is hash_type specific (e.g. 16 bytes for MD5, -20 bytes for SHA1). +20 bytes for SHA1, 32 bytes for SHA256). .SH RETURN VALUE Computed hostkey hash value, or NULL if the information is not available (either the session has not yet been started up, or the requested hash diff --git a/vendor/libssh2/docs/libssh2_session_callback_set.3 b/vendor/libssh2/docs/libssh2_session_callback_set.3 index 3901f88406..6a075cbfb8 100644 --- a/vendor/libssh2/docs/libssh2_session_callback_set.3 +++ b/vendor/libssh2/docs/libssh2_session_callback_set.3 @@ -32,11 +32,43 @@ function returns 0, the packet will be accepted nonetheless. .IP LIBSSH2_CALLBACK_X11 Called when an X11 connection has been accepted .IP LIBSSH2_CALLBACK_SEND -Called when libssh2 wants to send some data on the connection. -Can be set to a custom function to handle I/O your own way. +Called when libssh2 wants to send data on the connection. Can be set to a +custom function to handle I/O your own way. + +The prototype of the callback: + +.nf +ssize_t sendcb(libssh2_socket_t sockfd, const void *buffer, + size_t length, int flags, void **abstract); +.fi + +\fBsockfd\fP is the socket to write to, \fBbuffer\fP points to the data to +send, \fBlength\fP is the size of the data, \fBflags\fP is the flags that +would've been used to a \fIsend()\fP call and \fBabstract\fP is a pointer to +the abstract pointer set in the \fIlibssh2_session_init_ex(3)\fP call. + +The callback returns the number of bytes sent, or -1 for error. The special +return code \fB-EAGAIN\fP can be returned to signal that the send was aborted +to prevent getting blocked and it needs to be called again. .IP LIBSSH2_CALLBACK_RECV -Called when libssh2 wants to receive some data from the connection. -Can be set to a custom function to handle I/O your own way. +Called when libssh2 wants to read data from the connection. Can be set to a +custom function to handle I/O your own way. + +The prototype of the callback: + +.nf +ssize_t recvcb(libssh2_socket_t sockfd, void *buffer, + size_t length, int flags, void **abstract); +.fi + +\fBsockfd\fP is the socket to read from, \fBbuffer\fP where to store received +data into, \fBlength\fP is the size of the buffer, \fBflags\fP is the flags +that would've been used to a \fIrecv()\fP call and \fBabstract\fP is a pointer +to the abstract pointer set in the \fIlibssh2_session_init_ex(3)\fP call. + +The callback returns the number of bytes read, or -1 for error. The special +return code \fB-EAGAIN\fP can be returned to signal that the read was aborted +to prevent getting blocked and it needs to be called again. .SH RETURN VALUE Pointer to previous callback handler. Returns NULL if no prior callback handler was set or the callback type was unknown. diff --git a/vendor/libssh2/docs/libssh2_session_methods.3 b/vendor/libssh2/docs/libssh2_session_methods.3 index cc4f6d49f4..0e7f79fa96 100644 --- a/vendor/libssh2/docs/libssh2_session_methods.3 +++ b/vendor/libssh2/docs/libssh2_session_methods.3 @@ -1,4 +1,4 @@ -.TH libssh2_session_methods 3 "1 Jun 2007" "libssh2 0.15" "libssh2 manual" +.TH libssh2_session_methods 3 "8 Nov 2021" "libssh2 1.11" "libssh2 manual" .SH NAME libssh2_session_methods - return the currently active algorithms .SH SYNOPSIS @@ -8,13 +8,14 @@ const char * libssh2_session_methods(LIBSSH2_SESSION *session, int method_type); .SH DESCRIPTION -\fIsession\fP - Session instance as returned by +\fIsession\fP - Session instance as returned by .BR libssh2_session_init_ex(3) \fImethod_type\fP - one of the method type constants: LIBSSH2_METHOD_KEX, LIBSSH2_METHOD_HOSTKEY, LIBSSH2_METHOD_CRYPT_CS, LIBSSH2_METHOD_CRYPT_SC, LIBSSH2_METHOD_MAC_CS, LIBSSH2_METHOD_MAC_SC, LIBSSH2_METHOD_COMP_CS, -LIBSSH2_METHOD_COMP_SC, LIBSSH2_METHOD_LANG_CS, LIBSSH2_METHOD_LANG_SC. +LIBSSH2_METHOD_COMP_SC, LIBSSH2_METHOD_LANG_CS, LIBSSH2_METHOD_LANG_SC, +LIBSSH2_METHOD_SIGN_ALGO. Returns the actual method negotiated for a particular transport parameter. .SH RETURN VALUE diff --git a/vendor/libssh2/docs/libssh2_session_supported_algs.3 b/vendor/libssh2/docs/libssh2_session_supported_algs.3 index e8568f2e84..6e414a90c2 100644 --- a/vendor/libssh2/docs/libssh2_session_supported_algs.3 +++ b/vendor/libssh2/docs/libssh2_session_supported_algs.3 @@ -10,9 +10,9 @@ int libssh2_session_supported_algs(LIBSSH2_SESSION* session, const char*** algs); .SH DESCRIPTION \fIsession\fP - An instance of initialized LIBSSH2_SESSION (the function will -use its pointer to the memory allocation function). \fImethod_type\fP - Method -type. See .BR \fIlibssh2_session_method_pref(3)\fP. \fIalgs\fP - Address of a -pointer that will point to an array of returned algorithms +use its pointer to the memory allocation function). \fImethod_type\fP - +Method type. See \fIlibssh2_session_method_pref(3)\fP. \fIalgs\fP - Address +of a pointer that will point to an array of returned algorithms Get a list of supported algorithms for the given \fImethod_type\fP. The method_type parameter is equivalent to method_type in @@ -44,9 +44,9 @@ rc = libssh2_session_supported_algs(session, if (rc>0) { /* the call succeeded, do sth. with the list of algorithms (e.g. list them)... */ - printf("Supported symmetric algorithms:\n"); + printf("Supported symmetric algorithms:\\n"); for ( i=0; i + +int +libssh2_userauth_banner(LIBSSH2_SESSION *session, char **banner); +.SH DESCRIPTION +\fIsession\fP - Session instance as returned by +.BR libssh2_session_init_ex(3) + +\fIbanner\fP - Should point to a pointer that gets filled with banner message. + +After an authentication has been attempted, such as a +\fBSSH_USERAUTH_NONE\fP request sent by +.BR libssh2_userauth_list(3) , +this function can be called to retrieve the userauth banner sent by +the server. If no such banner is sent, or if an authentication has not +yet been attempted, returns LIBSSH2_ERROR_MISSING_USERAUTH_BANNER. +.SH RETURN VALUE +On success returns 0 and an UTF-8 NUL-terminated string is stored in the +\fIbanner\fP. This string is internally managed by libssh2 and will be +deallocated upon session termination. +On failure returns +LIBSSH2_ERROR_MISSING_USERAUTH_BANNER. +.SH SEE ALSO +.BR libssh2_session_init_ex(3), +.BR libssh2_userauth_list(3) diff --git a/vendor/libssh2/example/CMakeLists.txt b/vendor/libssh2/example/CMakeLists.txt index f77033f75c..3dc115ef19 100644 --- a/vendor/libssh2/example/CMakeLists.txt +++ b/vendor/libssh2/example/CMakeLists.txt @@ -57,6 +57,7 @@ set(EXAMPLES sftpdir_nonblock ssh2_exec ssh2_agent + ssh2_agent_forwarding ssh2_echo sftp_append subsystem_netconf diff --git a/vendor/libssh2/example/Makefile.am b/vendor/libssh2/example/Makefile.am index 5cf5f0714f..ec542cd15f 100644 --- a/vendor/libssh2/example/Makefile.am +++ b/vendor/libssh2/example/Makefile.am @@ -6,12 +6,12 @@ EXTRA_DIST = libssh2_config.h.in libssh2_config_cmake.h.in CMakeLists.txt noinst_PROGRAMS = direct_tcpip ssh2 scp scp_nonblock scp_write \ scp_write_nonblock sftp sftp_nonblock sftp_write sftp_write_nonblock \ sftp_mkdir sftp_mkdir_nonblock sftp_RW_nonblock sftp_write_sliding \ - sftpdir sftpdir_nonblock ssh2_exec ssh2_agent ssh2_echo sftp_append \ - subsystem_netconf tcpip-forward + sftpdir sftpdir_nonblock ssh2_exec ssh2_agent ssh2_agent_forwarding \ + ssh2_echo sftp_append subsystem_netconf tcpip-forward if HAVE_SYS_UN_H noinst_PROGRAMS += x11 endif -AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/example +AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/example -I../src LDADD = $(top_builddir)/src/libssh2.la diff --git a/vendor/libssh2/example/Makefile.in b/vendor/libssh2/example/Makefile.in index 87f9f12865..ff86a2e41e 100644 --- a/vendor/libssh2/example/Makefile.in +++ b/vendor/libssh2/example/Makefile.in @@ -1,7 +1,7 @@ -# Makefile.in generated by automake 1.15 from Makefile.am. +# Makefile.in generated by automake 1.16.4 from Makefile.am. # @configure_input@ -# Copyright (C) 1994-2014 Free Software Foundation, Inc. +# Copyright (C) 1994-2021 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -96,7 +96,8 @@ noinst_PROGRAMS = direct_tcpip$(EXEEXT) ssh2$(EXEEXT) scp$(EXEEXT) \ sftp_mkdir_nonblock$(EXEEXT) sftp_RW_nonblock$(EXEEXT) \ sftp_write_sliding$(EXEEXT) sftpdir$(EXEEXT) \ sftpdir_nonblock$(EXEEXT) ssh2_exec$(EXEEXT) \ - ssh2_agent$(EXEEXT) ssh2_echo$(EXEEXT) sftp_append$(EXEEXT) \ + ssh2_agent$(EXEEXT) ssh2_agent_forwarding$(EXEEXT) \ + ssh2_echo$(EXEEXT) sftp_append$(EXEEXT) \ subsystem_netconf$(EXEEXT) tcpip-forward$(EXEEXT) \ $(am__EXEEXT_1) @HAVE_SYS_UN_H_TRUE@am__append_1 = x11 @@ -112,7 +113,7 @@ am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h libssh2_config.h +CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = @HAVE_SYS_UN_H_TRUE@am__EXEEXT_1 = x11$(EXEEXT) @@ -193,6 +194,10 @@ ssh2_agent_SOURCES = ssh2_agent.c ssh2_agent_OBJECTS = ssh2_agent.$(OBJEXT) ssh2_agent_LDADD = $(LDADD) ssh2_agent_DEPENDENCIES = $(top_builddir)/src/libssh2.la +ssh2_agent_forwarding_SOURCES = ssh2_agent_forwarding.c +ssh2_agent_forwarding_OBJECTS = ssh2_agent_forwarding.$(OBJEXT) +ssh2_agent_forwarding_LDADD = $(LDADD) +ssh2_agent_forwarding_DEPENDENCIES = $(top_builddir)/src/libssh2.la ssh2_echo_SOURCES = ssh2_echo.c ssh2_echo_OBJECTS = ssh2_echo.$(OBJEXT) ssh2_echo_LDADD = $(LDADD) @@ -227,7 +232,20 @@ am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/direct_tcpip.Po ./$(DEPDIR)/scp.Po \ + ./$(DEPDIR)/scp_nonblock.Po ./$(DEPDIR)/scp_write.Po \ + ./$(DEPDIR)/scp_write_nonblock.Po ./$(DEPDIR)/sftp.Po \ + ./$(DEPDIR)/sftp_RW_nonblock.Po ./$(DEPDIR)/sftp_append.Po \ + ./$(DEPDIR)/sftp_mkdir.Po ./$(DEPDIR)/sftp_mkdir_nonblock.Po \ + ./$(DEPDIR)/sftp_nonblock.Po ./$(DEPDIR)/sftp_write.Po \ + ./$(DEPDIR)/sftp_write_nonblock.Po \ + ./$(DEPDIR)/sftp_write_sliding.Po ./$(DEPDIR)/sftpdir.Po \ + ./$(DEPDIR)/sftpdir_nonblock.Po ./$(DEPDIR)/ssh2.Po \ + ./$(DEPDIR)/ssh2_agent.Po ./$(DEPDIR)/ssh2_agent_forwarding.Po \ + ./$(DEPDIR)/ssh2_echo.Po ./$(DEPDIR)/ssh2_exec.Po \ + ./$(DEPDIR)/subsystem_netconf.Po ./$(DEPDIR)/tcpip-forward.Po \ + ./$(DEPDIR)/x11.Po am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) @@ -251,21 +269,22 @@ SOURCES = direct_tcpip.c scp.c scp_nonblock.c scp_write.c \ scp_write_nonblock.c sftp.c sftp_RW_nonblock.c sftp_append.c \ sftp_mkdir.c sftp_mkdir_nonblock.c sftp_nonblock.c \ sftp_write.c sftp_write_nonblock.c sftp_write_sliding.c \ - sftpdir.c sftpdir_nonblock.c ssh2.c ssh2_agent.c ssh2_echo.c \ - ssh2_exec.c subsystem_netconf.c tcpip-forward.c x11.c + sftpdir.c sftpdir_nonblock.c ssh2.c ssh2_agent.c \ + ssh2_agent_forwarding.c ssh2_echo.c ssh2_exec.c \ + subsystem_netconf.c tcpip-forward.c x11.c DIST_SOURCES = direct_tcpip.c scp.c scp_nonblock.c scp_write.c \ scp_write_nonblock.c sftp.c sftp_RW_nonblock.c sftp_append.c \ sftp_mkdir.c sftp_mkdir_nonblock.c sftp_nonblock.c \ sftp_write.c sftp_write_nonblock.c sftp_write_sliding.c \ - sftpdir.c sftpdir_nonblock.c ssh2.c ssh2_agent.c ssh2_echo.c \ - ssh2_exec.c subsystem_netconf.c tcpip-forward.c x11.c + sftpdir.c sftpdir_nonblock.c ssh2.c ssh2_agent.c \ + ssh2_agent_forwarding.c ssh2_echo.c ssh2_exec.c \ + subsystem_netconf.c tcpip-forward.c x11.c am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ - $(LISP)libssh2_config.h.in +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. @@ -282,10 +301,7 @@ am__define_uniq_tagged_files = \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/libssh2_config.h.in \ - $(top_srcdir)/depcomp +am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ @@ -302,6 +318,12 @@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ @@ -312,13 +334,14 @@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ +ETAGS = @ETAGS@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ HAVE_LIBBCRYPT = @HAVE_LIBBCRYPT@ HAVE_LIBCRYPT32 = @HAVE_LIBCRYPT32@ HAVE_LIBGCRYPT = @HAVE_LIBGCRYPT@ -HAVE_LIBMBEDTLS = @HAVE_LIBMBEDTLS@ +HAVE_LIBMBEDCRYPTO = @HAVE_LIBMBEDCRYPTO@ HAVE_LIBSSL = @HAVE_LIBSSL@ HAVE_LIBZ = @HAVE_LIBZ@ INSTALL = @INSTALL@ @@ -334,8 +357,8 @@ LIBCRYPT32 = @LIBCRYPT32@ LIBCRYPT32_PREFIX = @LIBCRYPT32_PREFIX@ LIBGCRYPT = @LIBGCRYPT@ LIBGCRYPT_PREFIX = @LIBGCRYPT_PREFIX@ -LIBMBEDTLS = @LIBMBEDTLS@ -LIBMBEDTLS_PREFIX = @LIBMBEDTLS_PREFIX@ +LIBMBEDCRYPTO = @LIBMBEDCRYPTO@ +LIBMBEDCRYPTO_PREFIX = @LIBMBEDCRYPTO_PREFIX@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSREQUIRED = @LIBSREQUIRED@ @@ -345,12 +368,13 @@ LIBSSL_PREFIX = @LIBSSL_PREFIX@ LIBTOOL = @LIBTOOL@ LIBZ = @LIBZ@ LIBZ_PREFIX = @LIBZ_PREFIX@ +LIB_FUZZING_ENGINE = @LIB_FUZZING_ENGINE@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBBCRYPT = @LTLIBBCRYPT@ LTLIBCRYPT32 = @LTLIBCRYPT32@ LTLIBGCRYPT = @LTLIBGCRYPT@ -LTLIBMBEDTLS = @LTLIBMBEDTLS@ +LTLIBMBEDCRYPTO = @LTLIBMBEDCRYPTO@ LTLIBOBJS = @LTLIBOBJS@ LTLIBSSL = @LTLIBSSL@ LTLIBZ = @LTLIBZ@ @@ -386,6 +410,7 @@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ @@ -435,10 +460,9 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign nostdinc EXTRA_DIST = libssh2_config.h.in libssh2_config_cmake.h.in CMakeLists.txt -AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/example +AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/example -I../src LDADD = $(top_builddir)/src/libssh2.la -all: libssh2_config.h - $(MAKE) $(AM_MAKEFLAGS) all-am +all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj @@ -459,8 +483,8 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) @@ -472,17 +496,6 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): -libssh2_config.h: stamp-h2 - @test -f $@ || rm -f stamp-h2 - @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h2 - -stamp-h2: $(srcdir)/libssh2_config.h.in $(top_builddir)/config.status - @rm -f stamp-h2 - cd $(top_builddir) && $(SHELL) ./config.status example/libssh2_config.h - -distclean-hdr: - -rm -f libssh2_config.h stamp-h2 - clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ @@ -564,6 +577,10 @@ ssh2_agent$(EXEEXT): $(ssh2_agent_OBJECTS) $(ssh2_agent_DEPENDENCIES) $(EXTRA_ss @rm -f ssh2_agent$(EXEEXT) $(AM_V_CCLD)$(LINK) $(ssh2_agent_OBJECTS) $(ssh2_agent_LDADD) $(LIBS) +ssh2_agent_forwarding$(EXEEXT): $(ssh2_agent_forwarding_OBJECTS) $(ssh2_agent_forwarding_DEPENDENCIES) $(EXTRA_ssh2_agent_forwarding_DEPENDENCIES) + @rm -f ssh2_agent_forwarding$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(ssh2_agent_forwarding_OBJECTS) $(ssh2_agent_forwarding_LDADD) $(LIBS) + ssh2_echo$(EXEEXT): $(ssh2_echo_OBJECTS) $(ssh2_echo_DEPENDENCIES) $(EXTRA_ssh2_echo_DEPENDENCIES) @rm -f ssh2_echo$(EXEEXT) $(AM_V_CCLD)$(LINK) $(ssh2_echo_OBJECTS) $(ssh2_echo_LDADD) $(LIBS) @@ -590,29 +607,36 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/direct_tcpip.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp_nonblock.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp_write.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp_write_nonblock.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_RW_nonblock.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_append.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_mkdir.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_mkdir_nonblock.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_nonblock.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_write.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_write_nonblock.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_write_sliding.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftpdir.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftpdir_nonblock.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_agent.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_echo.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_exec.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/subsystem_netconf.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tcpip-forward.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/x11.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/direct_tcpip.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp_nonblock.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp_write.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp_write_nonblock.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_RW_nonblock.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_append.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_mkdir.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_mkdir_nonblock.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_nonblock.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_write.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_write_nonblock.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp_write_sliding.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftpdir.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftpdir_nonblock.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_agent.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_agent_forwarding.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_echo.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_exec.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/subsystem_netconf.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tcpip-forward.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/x11.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @@ -692,8 +716,10 @@ cscopelist-am: $(am__tagged_files) distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am -distdir: $(DISTFILES) +distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ @@ -725,7 +751,7 @@ distdir: $(DISTFILES) done check-am: all-am check: check-am -all-am: Makefile $(PROGRAMS) libssh2_config.h +all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am @@ -763,10 +789,33 @@ clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ mostlyclean-am distclean: distclean-am - -rm -rf ./$(DEPDIR) + -rm -f ./$(DEPDIR)/direct_tcpip.Po + -rm -f ./$(DEPDIR)/scp.Po + -rm -f ./$(DEPDIR)/scp_nonblock.Po + -rm -f ./$(DEPDIR)/scp_write.Po + -rm -f ./$(DEPDIR)/scp_write_nonblock.Po + -rm -f ./$(DEPDIR)/sftp.Po + -rm -f ./$(DEPDIR)/sftp_RW_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_append.Po + -rm -f ./$(DEPDIR)/sftp_mkdir.Po + -rm -f ./$(DEPDIR)/sftp_mkdir_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_write.Po + -rm -f ./$(DEPDIR)/sftp_write_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_write_sliding.Po + -rm -f ./$(DEPDIR)/sftpdir.Po + -rm -f ./$(DEPDIR)/sftpdir_nonblock.Po + -rm -f ./$(DEPDIR)/ssh2.Po + -rm -f ./$(DEPDIR)/ssh2_agent.Po + -rm -f ./$(DEPDIR)/ssh2_agent_forwarding.Po + -rm -f ./$(DEPDIR)/ssh2_echo.Po + -rm -f ./$(DEPDIR)/ssh2_exec.Po + -rm -f ./$(DEPDIR)/subsystem_netconf.Po + -rm -f ./$(DEPDIR)/tcpip-forward.Po + -rm -f ./$(DEPDIR)/x11.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-tags + distclean-tags dvi: dvi-am @@ -809,7 +858,30 @@ install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) + -rm -f ./$(DEPDIR)/direct_tcpip.Po + -rm -f ./$(DEPDIR)/scp.Po + -rm -f ./$(DEPDIR)/scp_nonblock.Po + -rm -f ./$(DEPDIR)/scp_write.Po + -rm -f ./$(DEPDIR)/scp_write_nonblock.Po + -rm -f ./$(DEPDIR)/sftp.Po + -rm -f ./$(DEPDIR)/sftp_RW_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_append.Po + -rm -f ./$(DEPDIR)/sftp_mkdir.Po + -rm -f ./$(DEPDIR)/sftp_mkdir_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_write.Po + -rm -f ./$(DEPDIR)/sftp_write_nonblock.Po + -rm -f ./$(DEPDIR)/sftp_write_sliding.Po + -rm -f ./$(DEPDIR)/sftpdir.Po + -rm -f ./$(DEPDIR)/sftpdir_nonblock.Po + -rm -f ./$(DEPDIR)/ssh2.Po + -rm -f ./$(DEPDIR)/ssh2_agent.Po + -rm -f ./$(DEPDIR)/ssh2_agent_forwarding.Po + -rm -f ./$(DEPDIR)/ssh2_echo.Po + -rm -f ./$(DEPDIR)/ssh2_exec.Po + -rm -f ./$(DEPDIR)/subsystem_netconf.Po + -rm -f ./$(DEPDIR)/tcpip-forward.Po + -rm -f ./$(DEPDIR)/x11.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic @@ -828,18 +900,18 @@ ps-am: uninstall-am: -.MAKE: all install-am install-strip - -.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ - clean-libtool clean-noinstPROGRAMS cscopelist-am ctags \ - ctags-am distclean distclean-compile distclean-generic \ - distclean-hdr distclean-libtool distclean-tags distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ + clean-generic clean-libtool clean-noinstPROGRAMS cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am diff --git a/vendor/libssh2/example/direct_tcpip.c b/vendor/libssh2/example/direct_tcpip.c index fc3b5bfa74..4e1d90f489 100644 --- a/vendor/libssh2/example/direct_tcpip.c +++ b/vendor/libssh2/example/direct_tcpip.c @@ -72,8 +72,8 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } @@ -82,49 +82,50 @@ int main(int argc, char *argv[]) int listensock = -1, forwardsock = -1; #endif - if (argc > 1) + if(argc > 1) server_ip = argv[1]; - if (argc > 2) + if(argc > 2) username = argv[2]; - if (argc > 3) + if(argc > 3) password = argv[3]; - if (argc > 4) + if(argc > 4) local_listenip = argv[4]; - if (argc > 5) + if(argc > 5) local_listenport = atoi(argv[5]); - if (argc > 6) + if(argc > 6) remote_desthost = argv[6]; - if (argc > 7) + if(argc > 7) remote_destport = atoi(argv[7]); - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } /* Connect to SSH server */ sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); #ifdef WIN32 - if (sock == INVALID_SOCKET) { + if(sock == INVALID_SOCKET) { fprintf(stderr, "failed to open socket!\n"); return -1; } #else - if (sock == -1) { + if(sock == -1) { perror("socket"); return -1; } #endif sin.sin_family = AF_INET; - if (INADDR_NONE == (sin.sin_addr.s_addr = inet_addr(server_ip))) { + sin.sin_addr.s_addr = inet_addr(server_ip); + if(INADDR_NONE == sin.sin_addr.s_addr) { perror("inet_addr"); return -1; } sin.sin_port = htons(22); - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -159,44 +160,46 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); fprintf(stderr, "Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "password")) + if(strstr(userauthlist, "password")) auth |= AUTH_PASSWORD; - if (strstr(userauthlist, "publickey")) + if(strstr(userauthlist, "publickey")) auth |= AUTH_PUBLICKEY; /* check for options */ if(argc > 8) { - if ((auth & AUTH_PASSWORD) && !strcasecmp(argv[8], "-p")) + if((auth & AUTH_PASSWORD) && !strcasecmp(argv[8], "-p")) auth = AUTH_PASSWORD; - if ((auth & AUTH_PUBLICKEY) && !strcasecmp(argv[8], "-k")) + if((auth & AUTH_PUBLICKEY) && !strcasecmp(argv[8], "-k")) auth = AUTH_PUBLICKEY; } - if (auth & AUTH_PASSWORD) { - if (libssh2_userauth_password(session, username, password)) { + if(auth & AUTH_PASSWORD) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else if (auth & AUTH_PUBLICKEY) { - if (libssh2_userauth_publickey_fromfile(session, username, keyfile1, - keyfile2, password)) { + } + else if(auth & AUTH_PUBLICKEY) { + if(libssh2_userauth_publickey_fromfile(session, username, keyfile1, + keyfile2, password)) { fprintf(stderr, "\tAuthentication by public key failed!\n"); goto shutdown; } fprintf(stderr, "\tAuthentication by public key succeeded.\n"); - } else { + } + else { fprintf(stderr, "No supported authentication methods found!\n"); goto shutdown; } listensock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); #ifdef WIN32 - if (listensock == INVALID_SOCKET) { + if(listensock == INVALID_SOCKET) { fprintf(stderr, "failed to open listen socket!\n"); return -1; } #else - if (listensock == -1) { + if(listensock == -1) { perror("socket"); return -1; } @@ -204,18 +207,20 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(local_listenport); - if (INADDR_NONE == (sin.sin_addr.s_addr = inet_addr(local_listenip))) { + sin.sin_addr.s_addr = inet_addr(local_listenip); + if(INADDR_NONE == sin.sin_addr.s_addr) { perror("inet_addr"); goto shutdown; } sockopt = 1; - setsockopt(listensock, SOL_SOCKET, SO_REUSEADDR, &sockopt, sizeof(sockopt)); - sinlen=sizeof(sin); - if (-1 == bind(listensock, (struct sockaddr *)&sin, sinlen)) { + setsockopt(listensock, SOL_SOCKET, SO_REUSEADDR, &sockopt, + sizeof(sockopt)); + sinlen = sizeof(sin); + if(-1 == bind(listensock, (struct sockaddr *)&sin, sinlen)) { perror("bind"); goto shutdown; } - if (-1 == listen(listensock, 2)) { + if(-1 == listen(listensock, 2)) { perror("listen"); goto shutdown; } @@ -225,12 +230,12 @@ int main(int argc, char *argv[]) forwardsock = accept(listensock, (struct sockaddr *)&sin, &sinlen); #ifdef WIN32 - if (forwardsock == INVALID_SOCKET) { + if(forwardsock == INVALID_SOCKET) { fprintf(stderr, "failed to accept forward socket!\n"); goto shutdown; } #else - if (forwardsock == -1) { + if(forwardsock == -1) { perror("accept"); goto shutdown; } @@ -244,7 +249,7 @@ int main(int argc, char *argv[]) channel = libssh2_channel_direct_tcpip_ex(session, remote_desthost, remote_destport, shost, sport); - if (!channel) { + if(!channel) { fprintf(stderr, "Could not open the direct-tcpip channel!\n" "(Note that this can be a problem at the server!" " Please review the server logs.)\n"); @@ -254,22 +259,23 @@ int main(int argc, char *argv[]) /* Must use non-blocking IO hereafter due to the current libssh2 API */ libssh2_session_set_blocking(session, 0); - while (1) { + while(1) { FD_ZERO(&fds); FD_SET(forwardsock, &fds); tv.tv_sec = 0; tv.tv_usec = 100000; rc = select(forwardsock + 1, &fds, NULL, NULL, &tv); - if (-1 == rc) { + if(-1 == rc) { perror("select"); goto shutdown; } - if (rc && FD_ISSET(forwardsock, &fds)) { + if(rc && FD_ISSET(forwardsock, &fds)) { len = recv(forwardsock, buf, sizeof(buf), 0); - if (len < 0) { + if(len < 0) { perror("read"); goto shutdown; - } else if (0 == len) { + } + else if(0 == len) { fprintf(stderr, "The client at %s:%d disconnected!\n", shost, sport); goto shutdown; @@ -277,34 +283,34 @@ int main(int argc, char *argv[]) wr = 0; while(wr < len) { i = libssh2_channel_write(channel, buf + wr, len - wr); - if (LIBSSH2_ERROR_EAGAIN == i) { + if(LIBSSH2_ERROR_EAGAIN == i) { continue; } - if (i < 0) { + if(i < 0) { fprintf(stderr, "libssh2_channel_write: %d\n", i); goto shutdown; } wr += i; } } - while (1) { + while(1) { len = libssh2_channel_read(channel, buf, sizeof(buf)); - if (LIBSSH2_ERROR_EAGAIN == len) + if(LIBSSH2_ERROR_EAGAIN == len) break; - else if (len < 0) { + else if(len < 0) { fprintf(stderr, "libssh2_channel_read: %d", (int)len); goto shutdown; } wr = 0; - while (wr < len) { + while(wr < len) { i = send(forwardsock, buf + wr, len - wr, 0); - if (i <= 0) { + if(i <= 0) { perror("write"); goto shutdown; } wr += i; } - if (libssh2_channel_eof(channel)) { + if(libssh2_channel_eof(channel)) { fprintf(stderr, "The server at %s:%d disconnected!\n", remote_desthost, remote_destport); goto shutdown; @@ -320,7 +326,7 @@ int main(int argc, char *argv[]) close(forwardsock); close(listensock); #endif - if (channel) + if(channel) libssh2_channel_free(channel); libssh2_session_disconnect(session, "Client disconnecting normally"); libssh2_session_free(session); diff --git a/vendor/libssh2/example/libssh2_config.h.in b/vendor/libssh2/example/libssh2_config.h.in index af4ab9ca0a..307c625533 100644 --- a/vendor/libssh2/example/libssh2_config.h.in +++ b/vendor/libssh2/example/libssh2_config.h.in @@ -64,8 +64,8 @@ /* Define if you have the gcrypt library. */ #undef HAVE_LIBGCRYPT -/* Define if you have the mbedtls library. */ -#undef HAVE_LIBMBEDTLS +/* Define if you have the mbedcrypto library. */ +#undef HAVE_LIBMBEDCRYPTO /* Define if you have the ssl library. */ #undef HAVE_LIBSSL @@ -79,6 +79,9 @@ /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H +/* Define to 1 if you have the `memset_s' function. */ +#undef HAVE_MEMSET_S + /* Define to 1 if you have the header file. */ #undef HAVE_NETINET_IN_H @@ -178,10 +181,10 @@ /* Use mbedtls */ #undef LIBSSH2_MBEDTLS -/* Use OpenSSL */ +/* Use openssl */ #undef LIBSSH2_OPENSSL -/* Use Windows CNG */ +/* Use wincng */ #undef LIBSSH2_WINCNG /* Define to the sub-directory where libtool stores uninstalled libraries. */ diff --git a/vendor/libssh2/example/scp.c b/vendor/libssh2/example/scp.c index e8e4217d77..9ad1e7d407 100644 --- a/vendor/libssh2/example/scp.c +++ b/vendor/libssh2/example/scp.c @@ -38,9 +38,9 @@ int main(int argc, char *argv[]) const char *fingerprint; LIBSSH2_SESSION *session; LIBSSH2_CHANNEL *channel; - const char *username="username"; - const char *password="password"; - const char *scppath="/tmp/TEST"; + const char *username = "username"; + const char *password = "password"; + const char *scppath = "/tmp/TEST"; libssh2_struct_stat fileinfo; int rc; libssh2_struct_stat_size got = 0; @@ -49,31 +49,32 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } - if (argc > 4) { + if(argc > 4) { scppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -86,8 +87,8 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -119,18 +120,20 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) { +#define HOME_DIR "/home/username/" + if(libssh2_userauth_publickey_fromfile(session, username, + HOME_DIR ".ssh/id_rsa.pub", + HOME_DIR ".ssh/id_rsa", + password)) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -139,7 +142,7 @@ int main(int argc, char *argv[]) /* Request a file via SCP */ channel = libssh2_scp_recv2(session, scppath, &fileinfo); - if (!channel) { + if(!channel) { fprintf(stderr, "Unable to open a session: %d\n", libssh2_session_last_errno(session)); goto shutdown; @@ -148,7 +151,7 @@ int main(int argc, char *argv[]) while(got < fileinfo.st_size) { char mem[1024]; - int amount=sizeof(mem); + int amount = sizeof(mem); if((fileinfo.st_size -got) < amount) { amount = (int)(fileinfo.st_size -got); @@ -170,7 +173,8 @@ int main(int argc, char *argv[]) shutdown: - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, + "Normal Shutdown, Thank you for playing"); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/scp_nonblock.c b/vendor/libssh2/example/scp_nonblock.c index 45f66b83a6..bc5bdb3dc1 100644 --- a/vendor/libssh2/example/scp_nonblock.c +++ b/vendor/libssh2/example/scp_nonblock.c @@ -85,9 +85,9 @@ int main(int argc, char *argv[]) const char *fingerprint; LIBSSH2_SESSION *session; LIBSSH2_CHANNEL *channel; - const char *username="username"; - const char *password="password"; - const char *scppath="/tmp/TEST"; + const char *username = "username"; + const char *password = "password"; + const char *scppath = "/tmp/TEST"; libssh2_struct_stat fileinfo; #ifdef HAVE_GETTIMEOFDAY struct timeval start; @@ -103,31 +103,32 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } - if (argc > 4) { + if(argc > 4) { scppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -140,14 +141,14 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in))) { fprintf(stderr, "failed to connect!\n"); return -1; } /* Create a session instance */ session = libssh2_session_init(); - if (!session) + if(!session) return -1; /* Since we have set non-blocking, tell libssh2 we are non-blocking */ @@ -160,9 +161,9 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = libssh2_session_handshake(session, sock)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; } @@ -179,24 +180,25 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = libssh2_userauth_password(session, username, password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - while ((rc = libssh2_userauth_publickey_fromfile(session, username, - "/home/username/" - ".ssh/id_rsa.pub", - "/home/username/" - ".ssh/id_rsa", - password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = libssh2_userauth_publickey_fromfile(session, username, + "/home/username/" + ".ssh/id_rsa.pub", + "/home/username/" + ".ssh/id_rsa", + password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -211,7 +213,7 @@ int main(int argc, char *argv[]) do { channel = libssh2_scp_recv2(session, scppath, &fileinfo); - if (!channel) { + if(!channel) { if(libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { char *err_msg; @@ -224,7 +226,7 @@ int main(int argc, char *argv[]) waitsocket(sock, session); } } - } while (!channel); + } while(!channel); fprintf(stderr, "libssh2_scp_recv() is done, now receive data!\n"); while(got < fileinfo.st_size) { @@ -232,22 +234,22 @@ int main(int argc, char *argv[]) int rc; do { - int amount=sizeof(mem); + int amount = sizeof(mem); - if ((fileinfo.st_size -got) < amount) { + if((fileinfo.st_size -got) < amount) { amount = (int)(fileinfo.st_size - got); } /* loop until we block */ rc = libssh2_channel_read(channel, mem, amount); - if (rc > 0) { + if(rc > 0) { write(1, mem, rc); got += rc; total += rc; } - } while (rc > 0); + } while(rc > 0); - if ((rc == LIBSSH2_ERROR_EAGAIN) && (got < fileinfo.st_size)) { + if((rc == LIBSSH2_ERROR_EAGAIN) && (got < fileinfo.st_size)) { /* this is due to blocking that would occur otherwise so we loop on this condition */ @@ -262,10 +264,11 @@ int main(int argc, char *argv[]) gettimeofday(&end, NULL); time_ms = tvdiff(end, start); - fprintf(stderr, "Got " LIBSSH2_STRUCT_STAT_SIZE_FORMAT " bytes in %ld ms = %.1f bytes/sec spin: %d\n", total, - time_ms, total/(time_ms/1000.0), spin); + fprintf(stderr, "Got %ld bytes in %ld ms = %.1f bytes/sec spin: %d\n", + (long)total, + time_ms, total/(time_ms/1000.0), spin); #else - fprintf(stderr, "Got " LIBSSH2_STRUCT_STAT_SIZE_FORMAT " bytes spin: %d\n", total, spin); + fprintf(stderr, "Got %ld bytes spin: %d\n", (long)total, spin); #endif libssh2_channel_free(channel); diff --git a/vendor/libssh2/example/scp_write.c b/vendor/libssh2/example/scp_write.c index eef6e811f9..030232ec6c 100644 --- a/vendor/libssh2/example/scp_write.c +++ b/vendor/libssh2/example/scp_write.c @@ -38,10 +38,10 @@ int main(int argc, char *argv[]) const char *fingerprint; LIBSSH2_SESSION *session = NULL; LIBSSH2_CHANNEL *channel; - const char *username="username"; - const char *password="password"; - const char *loclfile="scp_write.c"; - const char *scppath="/tmp/TEST"; + const char *username = "username"; + const char *password = "password"; + const char *loclfile = "scp_write.c"; + const char *scppath = "/tmp/TEST"; FILE *local; int rc; char mem[1024]; @@ -53,39 +53,40 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } if(argc > 4) { loclfile = argv[4]; } - if (argc > 5) { + if(argc > 5) { scppath = argv[5]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } local = fopen(loclfile, "rb"); - if (!local) { + if(!local) { fprintf(stderr, "Can't open local file %s\n", loclfile); return -1; } @@ -105,8 +106,8 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -138,18 +139,20 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) { +#define HOME "/home/username/" + if(libssh2_userauth_publickey_fromfile(session, username, + HOME ".ssh/id_rsa.pub", + HOME ".ssh/id_rsa", + password)) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -159,7 +162,7 @@ int main(int argc, char *argv[]) channel = libssh2_scp_send(session, scppath, fileinfo.st_mode & 0777, (unsigned long)fileinfo.st_size); - if (!channel) { + if(!channel) { char *errmsg; int errlen; int err = libssh2_session_last_error(session, &errmsg, &errlen, 0); @@ -170,7 +173,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "SCP session waiting to send file\n"); do { nread = fread(mem, 1, sizeof(mem), local); - if (nread <= 0) { + if(nread <= 0) { /* end of file */ break; } @@ -179,7 +182,7 @@ int main(int argc, char *argv[]) do { /* write the same data over and over, until error or completion */ rc = libssh2_channel_write(channel, ptr, nread); - if (rc < 0) { + if(rc < 0) { fprintf(stderr, "ERROR %d\n", rc); break; } @@ -188,9 +191,9 @@ int main(int argc, char *argv[]) ptr += rc; nread -= rc; } - } while (nread); + } while(nread); - } while (1); + } while(1); fprintf(stderr, "Sending EOF\n"); libssh2_channel_send_eof(channel); @@ -207,7 +210,7 @@ int main(int argc, char *argv[]) shutdown: if(session) { - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, "Normal Shutdown"); libssh2_session_free(session); } #ifdef WIN32 @@ -215,7 +218,7 @@ int main(int argc, char *argv[]) #else close(sock); #endif - if (local) + if(local) fclose(local); fprintf(stderr, "all done\n"); diff --git a/vendor/libssh2/example/scp_write_nonblock.c b/vendor/libssh2/example/scp_write_nonblock.c index bb8e39dcbb..9226322e9a 100644 --- a/vendor/libssh2/example/scp_write_nonblock.c +++ b/vendor/libssh2/example/scp_write_nonblock.c @@ -73,10 +73,10 @@ int main(int argc, char *argv[]) const char *fingerprint; LIBSSH2_SESSION *session = NULL; LIBSSH2_CHANNEL *channel; - const char *username="username"; - const char *password="password"; - const char *loclfile="scp_write.c"; - const char *scppath="/tmp/TEST"; + const char *username = "username"; + const char *password = "password"; + const char *loclfile = "scp_write.c"; + const char *scppath = "/tmp/TEST"; FILE *local; int rc; char mem[1024*100]; @@ -92,39 +92,40 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } if(argc > 4) { loclfile = argv[4]; } - if (argc > 5) { + if(argc > 5) { scppath = argv[5]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } local = fopen(loclfile, "rb"); - if (!local) { + if(!local) { fprintf(stderr, "Can't local file %s\n", loclfile); return -1; } @@ -140,8 +141,8 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -158,8 +159,8 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) - == LIBSSH2_ERROR_EAGAIN); + while((rc = libssh2_session_handshake(session, sock)) + == LIBSSH2_ERROR_EAGAIN); if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; @@ -177,21 +178,24 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = libssh2_userauth_password(session, username, password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - while ((rc = libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { +#define HOME "/home/username/" + while((rc = libssh2_userauth_publickey_fromfile(session, username, + HOME ".ssh/id_rsa.pub", + HOME ".ssh/id_rsa", + password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -202,21 +206,21 @@ int main(int argc, char *argv[]) channel = libssh2_scp_send(session, scppath, fileinfo.st_mode & 0777, (unsigned long)fileinfo.st_size); - if ((!channel) && (libssh2_session_last_errno(session) != - LIBSSH2_ERROR_EAGAIN)) { + if((!channel) && (libssh2_session_last_errno(session) != + LIBSSH2_ERROR_EAGAIN)) { char *err_msg; libssh2_session_last_error(session, &err_msg, NULL, 0); fprintf(stderr, "%s\n", err_msg); goto shutdown; } - } while (!channel); + } while(!channel); fprintf(stderr, "SCP session waiting to send file\n"); start = time(NULL); do { nread = fread(mem, 1, sizeof(mem), local); - if (nread <= 0) { + if(nread <= 0) { /* end of file */ break; } @@ -226,12 +230,12 @@ int main(int argc, char *argv[]) prev = 0; do { - while ((rc = libssh2_channel_write(channel, ptr, nread)) == - LIBSSH2_ERROR_EAGAIN) { + while((rc = libssh2_channel_write(channel, ptr, nread)) == + LIBSSH2_ERROR_EAGAIN) { waitsocket(sock, session); prev = 0; } - if (rc < 0) { + if(rc < 0) { fprintf(stderr, "ERROR %d total %ld / %d prev %d\n", rc, total, (int)nread, (int)prev); break; @@ -243,8 +247,8 @@ int main(int argc, char *argv[]) nread -= rc; ptr += rc; } - } while (nread); - } while (!nread); /* only continue if nread was drained */ + } while(nread); + } while(!nread); /* only continue if nread was drained */ duration = (int)(time(NULL)-start); @@ -252,22 +256,22 @@ int main(int argc, char *argv[]) total, duration, total/(double)duration); fprintf(stderr, "Sending EOF\n"); - while (libssh2_channel_send_eof(channel) == LIBSSH2_ERROR_EAGAIN); + while(libssh2_channel_send_eof(channel) == LIBSSH2_ERROR_EAGAIN); fprintf(stderr, "Waiting for EOF\n"); - while (libssh2_channel_wait_eof(channel) == LIBSSH2_ERROR_EAGAIN); + while(libssh2_channel_wait_eof(channel) == LIBSSH2_ERROR_EAGAIN); fprintf(stderr, "Waiting for channel to close\n"); - while (libssh2_channel_wait_closed(channel) == LIBSSH2_ERROR_EAGAIN); + while(libssh2_channel_wait_closed(channel) == LIBSSH2_ERROR_EAGAIN); libssh2_channel_free(channel); channel = NULL; shutdown: - while (libssh2_session_disconnect(session, - "Normal Shutdown, Thank you for playing") == - LIBSSH2_ERROR_EAGAIN); + while(libssh2_session_disconnect(session, + "Normal Shutdown,") == + LIBSSH2_ERROR_EAGAIN); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/sftp.c b/vendor/libssh2/example/sftp.c index 0feb534d09..8f67244a6c 100644 --- a/vendor/libssh2/example/sftp.c +++ b/vendor/libssh2/example/sftp.c @@ -37,18 +37,19 @@ #include -const char *keyfile1="~/.ssh/id_rsa.pub"; -const char *keyfile2="~/.ssh/id_rsa"; -const char *username="username"; -const char *password="password"; -const char *sftppath="/tmp/TEST"; - - -static void kbd_callback(const char *name, int name_len, - const char *instruction, int instruction_len, int num_prompts, - const LIBSSH2_USERAUTH_KBDINT_PROMPT *prompts, - LIBSSH2_USERAUTH_KBDINT_RESPONSE *responses, - void **abstract) +const char *keyfile1 = "~/.ssh/id_rsa.pub"; +const char *keyfile2 = "~/.ssh/id_rsa"; +const char *username = "username"; +const char *password = "password"; +const char *sftppath = "/tmp/TEST"; + + +static void kbd_callback(const char *name, int name_len, + const char *instruction, int instruction_len, + int num_prompts, + const LIBSSH2_USERAUTH_KBDINT_PROMPT *prompts, + LIBSSH2_USERAUTH_KBDINT_RESPONSE *responses, + void **abstract) { int i; size_t n; @@ -67,7 +68,7 @@ static void kbd_callback(const char *name, int name_len, fprintf(stderr, "Number of prompts: %d\n\n", num_prompts); - for (i = 0; i < num_prompts; i++) { + for(i = 0; i < num_prompts; i++) { fprintf(stderr, "Prompt %d from server: '", i); fwrite(prompts[i].text, 1, prompts[i].length, stderr); fprintf(stderr, "'\n"); @@ -75,7 +76,7 @@ static void kbd_callback(const char *name, int name_len, fprintf(stderr, "Please type response: "); fgets(buf, sizeof(buf), stdin); n = strlen(buf); - while (n > 0 && strchr("\r\n", buf[n - 1])) + while(n > 0 && strchr("\r\n", buf[n - 1])) n--; buf[n] = 0; @@ -108,16 +109,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -131,8 +133,8 @@ int main(int argc, char *argv[]) sftppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { + rc = libssh2_init(0); + if(rc != 0) { fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -146,8 +148,8 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -185,54 +187,61 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); fprintf(stderr, "Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "password") != NULL) { + if(strstr(userauthlist, "password") != NULL) { auth_pw |= 1; } - if (strstr(userauthlist, "keyboard-interactive") != NULL) { + if(strstr(userauthlist, "keyboard-interactive") != NULL) { auth_pw |= 2; } - if (strstr(userauthlist, "publickey") != NULL) { + if(strstr(userauthlist, "publickey") != NULL) { auth_pw |= 4; } - /* if we got an 4. argument we set this option if supported */ + /* if we got an 4. argument we set this option if supported */ if(argc > 5) { - if ((auth_pw & 1) && !strcasecmp(argv[5], "-p")) { + if((auth_pw & 1) && !strcasecmp(argv[5], "-p")) { auth_pw = 1; } - if ((auth_pw & 2) && !strcasecmp(argv[5], "-i")) { + if((auth_pw & 2) && !strcasecmp(argv[5], "-i")) { auth_pw = 2; } - if ((auth_pw & 4) && !strcasecmp(argv[5], "-k")) { + if((auth_pw & 4) && !strcasecmp(argv[5], "-k")) { auth_pw = 4; } } - if (auth_pw & 1) { + if(auth_pw & 1) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else if (auth_pw & 2) { + } + else if(auth_pw & 2) { /* Or via keyboard-interactive */ - if (libssh2_userauth_keyboard_interactive(session, username, &kbd_callback) ) { + if(libssh2_userauth_keyboard_interactive(session, username, + &kbd_callback) ) { fprintf(stderr, "\tAuthentication by keyboard-interactive failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, "\tAuthentication by keyboard-interactive succeeded.\n"); } - } else if (auth_pw & 4) { + } + else if(auth_pw & 4) { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, keyfile1, keyfile2, password)) { + if(libssh2_userauth_publickey_fromfile(session, username, keyfile1, + keyfile2, password)) { fprintf(stderr, "\tAuthentication by public key failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, "\tAuthentication by public key succeeded.\n"); } - } else { + } + else { fprintf(stderr, "No supported authentication methods found!\n"); goto shutdown; } @@ -240,7 +249,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "libssh2_sftp_init()!\n"); sftp_session = libssh2_sftp_init(session); - if (!sftp_session) { + if(!sftp_session) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } @@ -250,7 +259,7 @@ int main(int argc, char *argv[]) sftp_handle = libssh2_sftp_open(sftp_session, sftppath, LIBSSH2_FXF_READ, 0); - if (!sftp_handle) { + if(!sftp_handle) { fprintf(stderr, "Unable to open file with SFTP: %ld\n", libssh2_sftp_last_error(sftp_session)); goto shutdown; @@ -262,19 +271,20 @@ int main(int argc, char *argv[]) /* loop until we fail */ fprintf(stderr, "libssh2_sftp_read()!\n"); rc = libssh2_sftp_read(sftp_handle, mem, sizeof(mem)); - if (rc > 0) { + if(rc > 0) { write(1, mem, rc); - } else { + } + else { break; } - } while (1); + } while(1); libssh2_sftp_close(sftp_handle); libssh2_sftp_shutdown(sftp_session); shutdown: - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, "Normal Shutdown"); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/sftp_RW_nonblock.c b/vendor/libssh2/example/sftp_RW_nonblock.c index 133815aa1a..70d87db35d 100644 --- a/vendor/libssh2/example/sftp_RW_nonblock.c +++ b/vendor/libssh2/example/sftp_RW_nonblock.c @@ -79,10 +79,10 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *sftppath="/tmp/TEST"; /* source path */ - const char *dest="/tmp/TEST2"; /* destination path */ + const char *username = "username"; + const char *password = "password"; + const char *sftppath = "/tmp/TEST"; /* source path */ + const char *dest = "/tmp/TEST2"; /* destination path */ int rc; LIBSSH2_SFTP *sftp_session; LIBSSH2_SFTP_HANDLE *sftp_handle; @@ -90,21 +90,22 @@ int main(int argc, char *argv[]) char mem[1000]; struct timeval timeout; fd_set fd; + fd_set fd2; #ifdef WIN32 WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -117,7 +118,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = htonl(0x7F000001); - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -171,25 +172,26 @@ int main(int argc, char *argv[]) goto shutdown; } - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) + while((rc = libssh2_userauth_password(session, username, password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - while ((rc = - libssh2_userauth_publickey_fromfile(session, username, - "/home/username/" - ".ssh/id_rsa.pub", - "/home/username/" - ".ssh/id_rsa", - password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = + libssh2_userauth_publickey_fromfile(session, username, + "/home/username/" + ".ssh/id_rsa.pub", + "/home/username/" + ".ssh/id_rsa", + password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -209,15 +211,15 @@ int main(int argc, char *argv[]) goto shutdown; } } - } while (!sftp_session); + } while(!sftp_session); /* Request a file via SFTP */ do { sftp_handle = libssh2_sftp_open(sftp_session, sftppath, LIBSSH2_FXF_READ, 0); - if (!sftp_handle) { - if (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { + if(!sftp_handle) { + if(libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { fprintf(stderr, "Unable to open file with SFTP\n"); goto shutdown; } @@ -226,7 +228,7 @@ int main(int argc, char *argv[]) waitsocket(sock, session); /* now we wait */ } } - } while (!sftp_handle); + } while(!sftp_handle); fprintf(stderr, "libssh2_sftp_open() is done, now receive data!\n"); do { @@ -242,7 +244,7 @@ int main(int argc, char *argv[]) /* write to temporary storage area */ fwrite(mem, rc, 1, tempstorage); } - } while (rc > 0); + } while(rc > 0); if(rc != LIBSSH2_ERROR_EAGAIN) { /* error or end of file */ @@ -253,11 +255,12 @@ int main(int argc, char *argv[]) timeout.tv_usec = 0; FD_ZERO(&fd); - + FD_ZERO(&fd2); FD_SET(sock, &fd); + FD_SET(sock, &fd2); /* wait for readable or writeable */ - rc = select(sock+1, &fd, &fd, NULL, &timeout); + rc = select(sock + 1, &fd, &fd2, NULL, &timeout); if(rc <= 0) { /* negative is error 0 is timeout */ @@ -265,7 +268,7 @@ int main(int argc, char *argv[]) break; } - } while (1); + } while(1); libssh2_sftp_close(sftp_handle); fclose(tempstorage); @@ -301,7 +304,7 @@ int main(int argc, char *argv[]) nread); ptr += rc; nread -= nread; - } while (rc >= 0); + } while(rc >= 0); if(rc != LIBSSH2_ERROR_EAGAIN) { /* error or end of file */ @@ -312,11 +315,12 @@ int main(int argc, char *argv[]) timeout.tv_usec = 0; FD_ZERO(&fd); - + FD_ZERO(&fd2); FD_SET(sock, &fd); + FD_SET(sock, &fd2); /* wait for readable or writeable */ - rc = select(sock+1, &fd, &fd, NULL, &timeout); + rc = select(sock + 1, &fd, &fd2, NULL, &timeout); if(rc <= 0) { /* negative is error 0 is timeout */ @@ -324,7 +328,7 @@ int main(int argc, char *argv[]) rc); break; } - } while (1); + } while(1); fprintf(stderr, "SFTP upload done!\n"); } else { @@ -336,7 +340,7 @@ int main(int argc, char *argv[]) shutdown: - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, "Normal Shutdown"); libssh2_session_free(session); #ifdef WIN32 @@ -344,7 +348,7 @@ int main(int argc, char *argv[]) #else close(sock); #endif - if (tempstorage) + if(tempstorage) fclose(tempstorage); fprintf(stderr, "all done\n"); diff --git a/vendor/libssh2/example/sftp_append.c b/vendor/libssh2/example/sftp_append.c index 788c51f5af..bfea1f7277 100644 --- a/vendor/libssh2/example/sftp_append.c +++ b/vendor/libssh2/example/sftp_append.c @@ -40,10 +40,10 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *loclfile="sftp_write.c"; - const char *sftppath="/tmp/TEST"; + const char *username = "username"; + const char *password = "password"; + const char *loclfile = "sftp_write.c"; + const char *sftppath = "/tmp/TEST"; int rc; FILE *local; LIBSSH2_SFTP *sftp_session; @@ -57,16 +57,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -83,14 +84,14 @@ int main(int argc, char *argv[]) sftppath = argv[5]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } local = fopen(loclfile, "rb"); - if (!local) { + if(!local) { fprintf(stderr, "Can't open local file %s\n", loclfile); return -1; } @@ -104,8 +105,8 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -140,18 +141,20 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) { +#define HOME "/home/username/" + if(libssh2_userauth_publickey_fromfile(session, username, + HOME ".ssh/id_rsa.pub", + HOME ".ssh/id_rsa", + password)) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -160,7 +163,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "libssh2_sftp_init()!\n"); sftp_session = libssh2_sftp_init(session); - if (!sftp_session) { + if(!sftp_session) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } @@ -173,7 +176,7 @@ int main(int argc, char *argv[]) LIBSSH2_FXF_WRITE|LIBSSH2_FXF_READ, LIBSSH2_SFTP_S_IRUSR|LIBSSH2_SFTP_S_IWUSR| LIBSSH2_SFTP_S_IRGRP|LIBSSH2_SFTP_S_IROTH); - if (!sftp_handle) { + if(!sftp_handle) { fprintf(stderr, "Unable to open file with SFTP\n"); goto shutdown; } @@ -188,14 +191,14 @@ int main(int argc, char *argv[]) fprintf(stderr, "libssh2_sftp_open() a handle for APPEND\n"); - if (!sftp_handle) { + if(!sftp_handle) { fprintf(stderr, "Unable to open file with SFTP\n"); goto shutdown; } fprintf(stderr, "libssh2_sftp_open() is done, now send data!\n"); do { nread = fread(mem, 1, sizeof(mem), local); - if (nread <= 0) { + if(nread <= 0) { /* end of file */ break; } @@ -208,9 +211,9 @@ int main(int argc, char *argv[]) break; ptr += rc; nread -= rc; - } while (nread); + } while(nread); - } while (rc > 0); + } while(rc > 0); libssh2_sftp_close(sftp_handle); libssh2_sftp_shutdown(sftp_session); @@ -225,7 +228,7 @@ int main(int argc, char *argv[]) #else close(sock); #endif - if (local) + if(local) fclose(local); fprintf(stderr, "all done\n"); diff --git a/vendor/libssh2/example/sftp_mkdir.c b/vendor/libssh2/example/sftp_mkdir.c index 1270adb032..2347abe425 100644 --- a/vendor/libssh2/example/sftp_mkdir.c +++ b/vendor/libssh2/example/sftp_mkdir.c @@ -40,9 +40,9 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *sftppath="/tmp/sftp_mkdir"; + const char *username = "username"; + const char *password = "password"; + const char *sftppath = "/tmp/sftp_mkdir"; int rc; LIBSSH2_SFTP *sftp_session; @@ -50,16 +50,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -73,9 +74,9 @@ int main(int argc, char *argv[]) sftppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -88,7 +89,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -121,15 +122,16 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, + if(libssh2_userauth_publickey_fromfile(session, username, "/home/username/.ssh/id_rsa.pub", "/home/username/.ssh/id_rsa", password)) { @@ -140,7 +142,7 @@ int main(int argc, char *argv[]) sftp_session = libssh2_sftp_init(session); - if (!sftp_session) { + if(!sftp_session) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } @@ -161,7 +163,7 @@ int main(int argc, char *argv[]) shutdown: - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, "Normal Shutdown"); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/sftp_mkdir_nonblock.c b/vendor/libssh2/example/sftp_mkdir_nonblock.c index db366d22d6..217cc4b3da 100644 --- a/vendor/libssh2/example/sftp_mkdir_nonblock.c +++ b/vendor/libssh2/example/sftp_mkdir_nonblock.c @@ -40,9 +40,9 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *sftppath="/tmp/sftp_mkdir_nonblock"; + const char *username = "username"; + const char *password = "password"; + const char *sftppath = "/tmp/sftp_mkdir_nonblock"; int rc; LIBSSH2_SFTP *sftp_session; @@ -50,16 +50,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -73,9 +74,9 @@ int main(int argc, char *argv[]) sftppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -88,7 +89,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -121,15 +122,16 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, + if(libssh2_userauth_publickey_fromfile(session, username, "/home/username/.ssh/id_rsa.pub", "/home/username/.ssh/id_rsa", password)) { @@ -141,7 +143,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "libssh2_sftp_init()!\n"); sftp_session = libssh2_sftp_init(session); - if (!sftp_session) { + if(!sftp_session) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } @@ -151,7 +153,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "libssh2_sftp_mkdirnb()!\n"); /* Make a directory via SFTP */ - while (libssh2_sftp_mkdir(sftp_session, sftppath, + while(libssh2_sftp_mkdir(sftp_session, sftppath, LIBSSH2_SFTP_S_IRWXU| LIBSSH2_SFTP_S_IRGRP|LIBSSH2_SFTP_S_IXGRP| LIBSSH2_SFTP_S_IROTH|LIBSSH2_SFTP_S_IXOTH) @@ -161,7 +163,7 @@ int main(int argc, char *argv[]) shutdown: - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, "Normal Shutdown"); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/sftp_nonblock.c b/vendor/libssh2/example/sftp_nonblock.c index 0db0cb6383..8ef091e1f2 100644 --- a/vendor/libssh2/example/sftp_nonblock.c +++ b/vendor/libssh2/example/sftp_nonblock.c @@ -85,9 +85,9 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *sftppath="/tmp/TEST"; + const char *username = "username"; + const char *password = "password"; + const char *sftppath = "/tmp/TEST"; #ifdef HAVE_GETTIMEOFDAY struct timeval start; struct timeval end; @@ -103,32 +103,33 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } - if (argc > 4) { + if(argc > 4) { sftppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -141,7 +142,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -149,7 +150,7 @@ int main(int argc, char *argv[]) /* Create a session instance */ session = libssh2_session_init(); - if (!session) + if(!session) return -1; /* Since we have set non-blocking, tell libssh2 we are non-blocking */ @@ -162,9 +163,9 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) == + while((rc = libssh2_session_handshake(session, sock)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; } @@ -181,25 +182,26 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) + while((rc = libssh2_userauth_password(session, username, password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - while ((rc = - libssh2_userauth_publickey_fromfile(session, username, - "/home/username/" - ".ssh/id_rsa.pub", - "/home/username/" - ".ssh/id_rsa", - password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = + libssh2_userauth_publickey_fromfile(session, username, + "/home/username/" + ".ssh/id_rsa.pub", + "/home/username/" + ".ssh/id_rsa", + password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -222,7 +224,7 @@ int main(int argc, char *argv[]) goto shutdown; } } - } while (!sftp_session); + } while(!sftp_session); fprintf(stderr, "libssh2_sftp_open()!\n"); /* Request a file via SFTP */ @@ -230,8 +232,8 @@ int main(int argc, char *argv[]) sftp_handle = libssh2_sftp_open(sftp_session, sftppath, LIBSSH2_FXF_READ, 0); - if (!sftp_handle) { - if (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { + if(!sftp_handle) { + if(libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { fprintf(stderr, "Unable to open file with SFTP\n"); goto shutdown; } @@ -240,31 +242,33 @@ int main(int argc, char *argv[]) waitsocket(sock, session); /* now we wait */ } } - } while (!sftp_handle); + } while(!sftp_handle); fprintf(stderr, "libssh2_sftp_open() is done, now receive data!\n"); do { char mem[1024*24]; /* loop until we fail */ - while ((rc = libssh2_sftp_read(sftp_handle, mem, + while((rc = libssh2_sftp_read(sftp_handle, mem, sizeof(mem))) == LIBSSH2_ERROR_EAGAIN) { spin++; waitsocket(sock, session); /* now we wait */ } - if (rc > 0) { + if(rc > 0) { total += rc; write(1, mem, rc); - } else { + } + else { break; } - } while (1); + } while(1); #ifdef HAVE_GETTIMEOFDAY gettimeofday(&end, NULL); time_ms = tvdiff(end, start); - fprintf(stderr, "Got %d bytes in %ld ms = %.1f bytes/sec spin: %d\n", total, - time_ms, total/(time_ms/1000.0), spin ); + fprintf(stderr, "Got %d bytes in %ld ms = %.1f bytes/sec spin: %d\n", + total, + time_ms, total/(time_ms/1000.0), spin); #else fprintf(stderr, "Got %d bytes spin: %d\n", total, spin); #endif @@ -275,7 +279,7 @@ int main(int argc, char *argv[]) shutdown: fprintf(stderr, "libssh2_session_disconnect\n"); - while (libssh2_session_disconnect(session, + while(libssh2_session_disconnect(session, "Normal Shutdown, Thank you") == LIBSSH2_ERROR_EAGAIN); libssh2_session_free(session); diff --git a/vendor/libssh2/example/sftp_write.c b/vendor/libssh2/example/sftp_write.c index 7afc187f18..c1350e9cb5 100644 --- a/vendor/libssh2/example/sftp_write.c +++ b/vendor/libssh2/example/sftp_write.c @@ -40,10 +40,10 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *loclfile="sftp_write.c"; - const char *sftppath="/tmp/TEST"; + const char *username = "username"; + const char *password = "password"; + const char *loclfile = "sftp_write.c"; + const char *sftppath = "/tmp/TEST"; int rc; FILE *local; LIBSSH2_SFTP *sftp_session; @@ -56,16 +56,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -82,14 +83,14 @@ int main(int argc, char *argv[]) sftppath = argv[5]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } local = fopen(loclfile, "rb"); - if (!local) { + if(!local) { fprintf(stderr, "Can't open local file %s\n", loclfile); return -1; } @@ -103,7 +104,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -139,18 +140,20 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) { + const char *pubkey = "/home/username/.ssh/id_rsa.pub"; + const char *privkey = "/home/username/.ssh/id_rsa.pub"; + if(libssh2_userauth_publickey_fromfile(session, username, + pubkey, privkey, + password)) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -159,7 +162,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "libssh2_sftp_init()!\n"); sftp_session = libssh2_sftp_init(session); - if (!sftp_session) { + if(!sftp_session) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } @@ -172,14 +175,14 @@ int main(int argc, char *argv[]) LIBSSH2_SFTP_S_IRUSR|LIBSSH2_SFTP_S_IWUSR| LIBSSH2_SFTP_S_IRGRP|LIBSSH2_SFTP_S_IROTH); - if (!sftp_handle) { + if(!sftp_handle) { fprintf(stderr, "Unable to open file with SFTP\n"); goto shutdown; } fprintf(stderr, "libssh2_sftp_open() is done, now send data!\n"); do { nread = fread(mem, 1, sizeof(mem), local); - if (nread <= 0) { + if(nread <= 0) { /* end of file */ break; } @@ -192,9 +195,9 @@ int main(int argc, char *argv[]) break; ptr += rc; nread -= rc; - } while (nread); + } while(nread); - } while (rc > 0); + } while(rc > 0); libssh2_sftp_close(sftp_handle); libssh2_sftp_shutdown(sftp_session); @@ -209,7 +212,7 @@ int main(int argc, char *argv[]) #else close(sock); #endif - if (local) + if(local) fclose(local); fprintf(stderr, "all done\n"); diff --git a/vendor/libssh2/example/sftp_write_nonblock.c b/vendor/libssh2/example/sftp_write_nonblock.c index 2e22395e9c..934749ea73 100644 --- a/vendor/libssh2/example/sftp_write_nonblock.c +++ b/vendor/libssh2/example/sftp_write_nonblock.c @@ -4,7 +4,7 @@ * The sample code has default values for host name, user name, password * and path to copy, but you can specify them on the command line like: * - * "sftp 192.168.0.1 user password sftp_write_nonblock.c /tmp/sftp_write_nonblock.c" + * "sftp 192.168.0.1 user password thisfile /tmp/storehere" */ #include "libssh2_config.h" @@ -77,10 +77,10 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *loclfile="sftp_write_nonblock.c"; - const char *sftppath="/tmp/sftp_write_nonblock.c"; + const char *username = "username"; + const char *password = "password"; + const char *loclfile = "sftp_write_nonblock.c"; + const char *sftppath = "/tmp/sftp_write_nonblock.c"; int rc; FILE *local; LIBSSH2_SFTP *sftp_session; @@ -96,40 +96,41 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } - if (argc > 4) { + if(argc > 4) { loclfile = argv[4]; } - if (argc > 5) { + if(argc > 5) { sftppath = argv[5]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } local = fopen(loclfile, "rb"); - if (!local) { + if(!local) { fprintf(stderr, "Can't open local file %s\n", loclfile); return -1; } @@ -143,7 +144,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -152,7 +153,7 @@ int main(int argc, char *argv[]) /* Create a session instance */ session = libssh2_session_init(); - if (!session) + if(!session) return -1; /* Since we have set non-blocking, tell libssh2 we are non-blocking */ @@ -161,9 +162,9 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) + while((rc = libssh2_session_handshake(session, sock)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; } @@ -180,22 +181,24 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) == + while((rc = libssh2_userauth_password(session, username, password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - while ((rc = libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + const char *pubkey = "/home/username/.ssh/id_rsa.pub"; + const char *privkey = "/home/username/.ssh/id_rsa"; + while((rc = libssh2_userauth_publickey_fromfile(session, username, + pubkey, privkey, + password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -205,28 +208,28 @@ int main(int argc, char *argv[]) do { sftp_session = libssh2_sftp_init(session); - if (!sftp_session && + if(!sftp_session && (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN)) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } - } while (!sftp_session); + } while(!sftp_session); fprintf(stderr, "libssh2_sftp_open()!\n"); /* Request a file via SFTP */ do { sftp_handle = - libssh2_sftp_open(sftp_session, sftppath, - LIBSSH2_FXF_WRITE|LIBSSH2_FXF_CREAT|LIBSSH2_FXF_TRUNC, - LIBSSH2_SFTP_S_IRUSR|LIBSSH2_SFTP_S_IWUSR| - LIBSSH2_SFTP_S_IRGRP|LIBSSH2_SFTP_S_IROTH); - - if (!sftp_handle && - (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN)) { + libssh2_sftp_open(sftp_session, sftppath, + LIBSSH2_FXF_WRITE|LIBSSH2_FXF_CREAT| + LIBSSH2_FXF_TRUNC, + LIBSSH2_SFTP_S_IRUSR|LIBSSH2_SFTP_S_IWUSR| + LIBSSH2_SFTP_S_IRGRP|LIBSSH2_SFTP_S_IROTH); + if(!sftp_handle && + (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN)) { fprintf(stderr, "Unable to open file with SFTP\n"); goto shutdown; } - } while (!sftp_handle); + } while(!sftp_handle); fprintf(stderr, "libssh2_sftp_open() is done, now send data!\n"); @@ -234,7 +237,7 @@ int main(int argc, char *argv[]) do { nread = fread(mem, 1, sizeof(mem), local); - if (nread <= 0) { + if(nread <= 0) { /* end of file */ break; } @@ -244,7 +247,7 @@ int main(int argc, char *argv[]) do { /* write data in a loop until we block */ - while ((rc = libssh2_sftp_write(sftp_handle, ptr, nread)) == + while((rc = libssh2_sftp_write(sftp_handle, ptr, nread)) == LIBSSH2_ERROR_EAGAIN) { waitsocket(sock, session); } @@ -253,8 +256,8 @@ int main(int argc, char *argv[]) ptr += rc; nread -= rc; - } while (nread); - } while (rc > 0); + } while(nread); + } while(rc > 0); duration = (int)(time(NULL)-start); @@ -268,7 +271,7 @@ int main(int argc, char *argv[]) shutdown: - while (libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing") + while(libssh2_session_disconnect(session, "Normal Shutdown") == LIBSSH2_ERROR_EAGAIN); libssh2_session_free(session); diff --git a/vendor/libssh2/example/sftp_write_sliding.c b/vendor/libssh2/example/sftp_write_sliding.c index 19fe851a25..9a72140f44 100644 --- a/vendor/libssh2/example/sftp_write_sliding.c +++ b/vendor/libssh2/example/sftp_write_sliding.c @@ -4,7 +4,7 @@ * The sample code has default values for host name, user name, password * and path to copy, but you can specify them on the command line like: * - * "sftp 192.168.0.1 user password sftp_write_nonblock.c /tmp/sftp_write_nonblock.c" + * "sftp 192.168.0.1 user password file /tmp/storehere" */ #include "libssh2_config.h" @@ -77,10 +77,10 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *loclfile="sftp_write_nonblock.c"; - const char *sftppath="/tmp/sftp_write_nonblock.c"; + const char *username = "username"; + const char *password = "password"; + const char *loclfile = "sftp_write_nonblock.c"; + const char *sftppath = "/tmp/sftp_write_nonblock.c"; int rc; FILE *local; LIBSSH2_SFTP *sftp_session; @@ -96,40 +96,41 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } - if (argc > 4) { + if(argc > 4) { loclfile = argv[4]; } - if (argc > 5) { + if(argc > 5) { sftppath = argv[5]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } local = fopen(loclfile, "rb"); - if (!local) { + if(!local) { fprintf(stderr, "Can't open local file %s\n", loclfile); return -1; } @@ -143,7 +144,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -152,7 +153,7 @@ int main(int argc, char *argv[]) /* Create a session instance */ session = libssh2_session_init(); - if (!session) + if(!session) return -1; /* Since we have set non-blocking, tell libssh2 we are non-blocking */ @@ -161,9 +162,9 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) + while((rc = libssh2_session_handshake(session, sock)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; } @@ -180,22 +181,24 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) == + while((rc = libssh2_userauth_password(session, username, password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - while ((rc = libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { +#define PUBKEY "/home/username/.ssh/id_rsa.pub" +#define PRIVKEY "/home/username/.ssh/id_rsa" + while((rc = libssh2_userauth_publickey_fromfile(session, username, + PUBKEY, PRIVKEY, + password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -205,28 +208,29 @@ int main(int argc, char *argv[]) do { sftp_session = libssh2_sftp_init(session); - if (!sftp_session && + if(!sftp_session && (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN)) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } - } while (!sftp_session); + } while(!sftp_session); fprintf(stderr, "libssh2_sftp_open()!\n"); /* Request a file via SFTP */ do { sftp_handle = - libssh2_sftp_open(sftp_session, sftppath, - LIBSSH2_FXF_WRITE|LIBSSH2_FXF_CREAT|LIBSSH2_FXF_TRUNC, - LIBSSH2_SFTP_S_IRUSR|LIBSSH2_SFTP_S_IWUSR| - LIBSSH2_SFTP_S_IRGRP|LIBSSH2_SFTP_S_IROTH); - - if (!sftp_handle && - (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN)) { + libssh2_sftp_open(sftp_session, sftppath, + LIBSSH2_FXF_WRITE|LIBSSH2_FXF_CREAT| + LIBSSH2_FXF_TRUNC, + LIBSSH2_SFTP_S_IRUSR|LIBSSH2_SFTP_S_IWUSR| + LIBSSH2_SFTP_S_IRGRP|LIBSSH2_SFTP_S_IROTH); + + if(!sftp_handle && + (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN)) { fprintf(stderr, "Unable to open file with SFTP\n"); goto shutdown; } - } while (!sftp_handle); + } while(!sftp_handle); fprintf(stderr, "libssh2_sftp_open() is done, now send data!\n"); @@ -235,9 +239,9 @@ int main(int argc, char *argv[]) memuse = 0; /* it starts blank */ do { nread = fread(&mem[memuse], 1, sizeof(mem)-memuse, local); - if (nread <= 0) { + if(nread <= 0) { /* end of file */ - if (memuse > 0) + if(memuse > 0) /* the previous sending is not finished */ nread = 0; else @@ -247,7 +251,7 @@ int main(int argc, char *argv[]) total += nread; /* write data in a loop until we block */ - while ((rc = libssh2_sftp_write(sftp_handle, mem, memuse)) == + while((rc = libssh2_sftp_write(sftp_handle, mem, memuse)) == LIBSSH2_ERROR_EAGAIN) { waitsocket(sock, session); } @@ -263,7 +267,7 @@ int main(int argc, char *argv[]) /* 'mem' was consumed fully */ memuse = 0; - } while (rc > 0); + } while(rc > 0); duration = (int)(time(NULL)-start); @@ -277,8 +281,8 @@ int main(int argc, char *argv[]) shutdown: - while (libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing") - == LIBSSH2_ERROR_EAGAIN); + while(libssh2_session_disconnect(session, "Normal Shutdown") + == LIBSSH2_ERROR_EAGAIN); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/sftpdir.c b/vendor/libssh2/example/sftpdir.c index c21f9b35de..8fb16b1e98 100644 --- a/vendor/libssh2/example/sftpdir.c +++ b/vendor/libssh2/example/sftpdir.c @@ -36,26 +36,16 @@ #include #include -/* last resort for systems not defining PRIu64 in inttypes.h */ -#ifndef __PRI64_PREFIX #ifdef WIN32 -#define __PRI64_PREFIX "I64" +#define __FILESIZE "I64" #else -#if __WORDSIZE == 64 -#define __PRI64_PREFIX "l" -#else -#define __PRI64_PREFIX "ll" -#endif /* __WORDSIZE */ -#endif /* WIN32 */ -#endif /* !__PRI64_PREFIX */ -#ifndef PRIu64 -#define PRIu64 __PRI64_PREFIX "u" -#endif /* PRIu64 */ - -const char *keyfile1="~/.ssh/id_rsa.pub"; -const char *keyfile2="~/.ssh/id_rsa"; -const char *username="username"; -const char *password="password"; +#define __FILESIZE "llu" +#endif + +const char *keyfile1 = "~/.ssh/id_rsa.pub"; +const char *keyfile2 = "~/.ssh/id_rsa"; +const char *username = "username"; +const char *password = "password"; static void kbd_callback(const char *name, int name_len, const char *instruction, int instruction_len, @@ -68,7 +58,7 @@ static void kbd_callback(const char *name, int name_len, (void)name_len; (void)instruction; (void)instruction_len; - if (num_prompts == 1) { + if(num_prompts == 1) { responses[0].text = strdup(password); responses[0].length = strlen(password); } @@ -84,7 +74,7 @@ int main(int argc, char *argv[]) const char *fingerprint; char *userauthlist; LIBSSH2_SESSION *session; - const char *sftppath="/tmp/secretdir"; + const char *sftppath = "/tmp/secretdir"; LIBSSH2_SFTP *sftp_session; LIBSSH2_SFTP_HANDLE *sftp_handle; @@ -92,16 +82,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -115,9 +106,9 @@ int main(int argc, char *argv[]) sftppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -130,8 +121,8 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -166,58 +157,64 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); fprintf(stderr, "Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "password") != NULL) { + if(strstr(userauthlist, "password") != NULL) { auth_pw |= 1; } - if (strstr(userauthlist, "keyboard-interactive") != NULL) { + if(strstr(userauthlist, "keyboard-interactive") != NULL) { auth_pw |= 2; } - if (strstr(userauthlist, "publickey") != NULL) { + if(strstr(userauthlist, "publickey") != NULL) { auth_pw |= 4; } /* if we got an 5. argument we set this option if supported */ if(argc > 5) { - if ((auth_pw & 1) && !strcasecmp(argv[5], "-p")) { + if((auth_pw & 1) && !strcasecmp(argv[5], "-p")) { auth_pw = 1; } - if ((auth_pw & 2) && !strcasecmp(argv[5], "-i")) { + if((auth_pw & 2) && !strcasecmp(argv[5], "-i")) { auth_pw = 2; } - if ((auth_pw & 4) && !strcasecmp(argv[5], "-k")) { + if((auth_pw & 4) && !strcasecmp(argv[5], "-k")) { auth_pw = 4; } } - if (auth_pw & 1) { + if(auth_pw & 1) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "\tAuthentication by password failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, "\tAuthentication by password succeeded.\n"); } - } else if (auth_pw & 2) { + } + else if(auth_pw & 2) { /* Or via keyboard-interactive */ - if (libssh2_userauth_keyboard_interactive(session, username, - &kbd_callback) ) { + if(libssh2_userauth_keyboard_interactive(session, username, + &kbd_callback) ) { fprintf(stderr, - "\tAuthentication by keyboard-interactive failed!\n"); + "\tAuthentication by keyboard-interactive failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, - "\tAuthentication by keyboard-interactive succeeded.\n"); + "\tAuthentication by keyboard-interactive succeeded.\n"); } - } else if (auth_pw & 4) { + } + else if(auth_pw & 4) { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, keyfile1, - keyfile2, password)) { + if(libssh2_userauth_publickey_fromfile(session, username, keyfile1, + keyfile2, password)) { fprintf(stderr, "\tAuthentication by public key failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, "\tAuthentication by public key succeeded.\n"); } - } else { + } + else { fprintf(stderr, "No supported authentication methods found!\n"); goto shutdown; } @@ -225,7 +222,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "libssh2_sftp_init()!\n"); sftp_session = libssh2_sftp_init(session); - if (!sftp_session) { + if(!sftp_session) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } @@ -237,7 +234,7 @@ int main(int argc, char *argv[]) /* Request a dir listing via SFTP */ sftp_handle = libssh2_sftp_opendir(sftp_session, sftppath); - if (!sftp_handle) { + if(!sftp_handle) { fprintf(stderr, "Unable to open dir with SFTP\n"); goto shutdown; } @@ -254,9 +251,10 @@ int main(int argc, char *argv[]) /* rc is the length of the file name in the mem buffer */ - if (longentry[0] != '\0') { + if(longentry[0] != '\0') { printf("%s\n", longentry); - } else { + } + else { if(attrs.flags & LIBSSH2_SFTP_ATTR_PERMISSIONS) { /* this should check what permissions it is and print the output accordingly */ @@ -267,14 +265,14 @@ int main(int argc, char *argv[]) } if(attrs.flags & LIBSSH2_SFTP_ATTR_UIDGID) { - printf("%4ld %4ld ", attrs.uid, attrs.gid); + printf("%4d %4d ", (int) attrs.uid, (int) attrs.gid); } else { printf(" - - "); } if(attrs.flags & LIBSSH2_SFTP_ATTR_SIZE) { - printf("%8" PRIu64 " ", attrs.filesize); + printf("%8" __FILESIZE " ", attrs.filesize); } printf("%s\n", mem); @@ -283,14 +281,14 @@ int main(int argc, char *argv[]) else break; - } while (1); + } while(1); libssh2_sftp_closedir(sftp_handle); libssh2_sftp_shutdown(sftp_session); shutdown: - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, "Normal Shutdown"); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/sftpdir_nonblock.c b/vendor/libssh2/example/sftpdir_nonblock.c index 1950e6712e..e9498d97cb 100644 --- a/vendor/libssh2/example/sftpdir_nonblock.c +++ b/vendor/libssh2/example/sftpdir_nonblock.c @@ -36,21 +36,11 @@ #include #include -/* last resort for systems not defining PRIu64 in inttypes.h */ -#ifndef __PRI64_PREFIX #ifdef WIN32 -#define __PRI64_PREFIX "I64" +#define __FILESIZE "I64" #else -#if __WORDSIZE == 64 -#define __PRI64_PREFIX "l" -#else -#define __PRI64_PREFIX "ll" -#endif /* __WORDSIZE */ -#endif /* WIN32 */ -#endif /* !__PRI64_PREFIX */ -#ifndef PRIu64 -#define PRIu64 __PRI64_PREFIX "u" -#endif /* PRIu64 */ +#define __FILESIZE "llu" +#endif int main(int argc, char *argv[]) { @@ -59,9 +49,11 @@ int main(int argc, char *argv[]) struct sockaddr_in sin; const char *fingerprint; LIBSSH2_SESSION *session; - const char *username="username"; - const char *password="password"; - const char *sftppath="/tmp/secretdir"; + const char *username = "username"; + const char *password = "password"; + const char *sftppath = "/tmp/secretdir"; + const char *pubkey = "/home/username/.ssh/id_rsa.pub"; + const char *privkey = "/home/username/.ssh/id_rsa"; int rc; LIBSSH2_SFTP *sftp_session; LIBSSH2_SFTP_HANDLE *sftp_handle; @@ -70,16 +62,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -93,9 +86,9 @@ int main(int argc, char *argv[]) sftppath = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -108,8 +101,8 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), - sizeof(struct sockaddr_in)) != 0) { + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; } @@ -126,8 +119,8 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) == - LIBSSH2_ERROR_EAGAIN); + while((rc = libssh2_session_handshake(session, sock)) == + LIBSSH2_ERROR_EAGAIN); if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; @@ -145,21 +138,22 @@ int main(int argc, char *argv[]) } fprintf(stderr, "\n"); - if (auth_pw) { + if(auth_pw) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = libssh2_userauth_password(session, username, password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else { + } + else { /* Or by public key */ - while ((rc = libssh2_userauth_publickey_fromfile(session, username, - "/home/username/.ssh/id_rsa.pub", - "/home/username/.ssh/id_rsa", - password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = libssh2_userauth_publickey_fromfile(session, username, + pubkey, privkey, + password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } @@ -169,24 +163,24 @@ int main(int argc, char *argv[]) do { sftp_session = libssh2_sftp_init(session); - if ((!sftp_session) && (libssh2_session_last_errno(session) != - LIBSSH2_ERROR_EAGAIN)) { + if((!sftp_session) && (libssh2_session_last_errno(session) != + LIBSSH2_ERROR_EAGAIN)) { fprintf(stderr, "Unable to init SFTP session\n"); goto shutdown; } - } while (!sftp_session); + } while(!sftp_session); fprintf(stderr, "libssh2_sftp_opendir()!\n"); /* Request a dir listing via SFTP */ do { sftp_handle = libssh2_sftp_opendir(sftp_session, sftppath); - if ((!sftp_handle) && (libssh2_session_last_errno(session) != - LIBSSH2_ERROR_EAGAIN)) { + if((!sftp_handle) && (libssh2_session_last_errno(session) != + LIBSSH2_ERROR_EAGAIN)) { fprintf(stderr, "Unable to open dir with SFTP\n"); goto shutdown; } - } while (!sftp_handle); + } while(!sftp_handle); fprintf(stderr, "libssh2_sftp_opendir() is done, now receive listing!\n"); do { @@ -194,8 +188,8 @@ int main(int argc, char *argv[]) LIBSSH2_SFTP_ATTRIBUTES attrs; /* loop until we fail */ - while ((rc = libssh2_sftp_readdir(sftp_handle, mem, sizeof(mem), - &attrs)) == LIBSSH2_ERROR_EAGAIN) { + while((rc = libssh2_sftp_readdir(sftp_handle, mem, sizeof(mem), + &attrs)) == LIBSSH2_ERROR_EAGAIN) { ; } if(rc > 0) { @@ -206,37 +200,40 @@ int main(int argc, char *argv[]) /* this should check what permissions it is and print the output accordingly */ printf("--fix----- "); - } else { + } + else { printf("---------- "); } if(attrs.flags & LIBSSH2_SFTP_ATTR_UIDGID) { - printf("%4ld %4ld ", attrs.uid, attrs.gid); - } else { + printf("%4d %4d ", (int) attrs.uid, (int) attrs.gid); + } + else { printf(" - - "); } if(attrs.flags & LIBSSH2_SFTP_ATTR_SIZE) { - printf("%8" PRIu64 " ", attrs.filesize); + printf("%8" __FILESIZE " ", attrs.filesize); } printf("%s\n", mem); } - else if (rc == LIBSSH2_ERROR_EAGAIN) { + else if(rc == LIBSSH2_ERROR_EAGAIN) { /* blocking */ fprintf(stderr, "Blocking\n"); - } else { + } + else { break; } - } while (1); + } while(1); libssh2_sftp_closedir(sftp_handle); libssh2_sftp_shutdown(sftp_session); shutdown: - libssh2_session_disconnect(session, "Normal Shutdown, Thank you for playing"); + libssh2_session_disconnect(session, "Normal Shutdown"); libssh2_session_free(session); #ifdef WIN32 diff --git a/vendor/libssh2/example/ssh2.c b/vendor/libssh2/example/ssh2.c index f9ee68aec6..fa86f55139 100644 --- a/vendor/libssh2/example/ssh2.c +++ b/vendor/libssh2/example/ssh2.c @@ -37,10 +37,10 @@ #include -const char *keyfile1="~/.ssh/id_rsa.pub"; -const char *keyfile2="~/.ssh/id_rsa"; -const char *username="username"; -const char *password="password"; +const char *keyfile1 = "~/.ssh/id_rsa.pub"; +const char *keyfile2 = "~/.ssh/id_rsa"; +const char *username = "username"; +const char *password = "password"; static void kbd_callback(const char *name, int name_len, @@ -54,7 +54,7 @@ static void kbd_callback(const char *name, int name_len, (void)name_len; (void)instruction; (void)instruction_len; - if (num_prompts == 1) { + if(num_prompts == 1) { responses[0].text = strdup(password); responses[0].length = strlen(password); } @@ -77,16 +77,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -97,9 +98,9 @@ int main(int argc, char *argv[]) password = argv[3]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -111,7 +112,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -121,7 +122,7 @@ int main(int argc, char *argv[]) * banners, exchange keys, and setup crypto, compression, and MAC layers */ session = libssh2_session_init(); - if (libssh2_session_handshake(session, sock)) { + if(libssh2_session_handshake(session, sock)) { fprintf(stderr, "Failure establishing SSH session\n"); return -1; } @@ -141,64 +142,71 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); fprintf(stderr, "Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "password") != NULL) { + if(strstr(userauthlist, "password") != NULL) { auth_pw |= 1; } - if (strstr(userauthlist, "keyboard-interactive") != NULL) { + if(strstr(userauthlist, "keyboard-interactive") != NULL) { auth_pw |= 2; } - if (strstr(userauthlist, "publickey") != NULL) { + if(strstr(userauthlist, "publickey") != NULL) { auth_pw |= 4; } /* if we got an 4. argument we set this option if supported */ if(argc > 4) { - if ((auth_pw & 1) && !strcasecmp(argv[4], "-p")) { + if((auth_pw & 1) && !strcasecmp(argv[4], "-p")) { auth_pw = 1; } - if ((auth_pw & 2) && !strcasecmp(argv[4], "-i")) { + if((auth_pw & 2) && !strcasecmp(argv[4], "-i")) { auth_pw = 2; } - if ((auth_pw & 4) && !strcasecmp(argv[4], "-k")) { + if((auth_pw & 4) && !strcasecmp(argv[4], "-k")) { auth_pw = 4; } } - if (auth_pw & 1) { + if(auth_pw & 1) { /* We could authenticate via password */ - if (libssh2_userauth_password(session, username, password)) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "\tAuthentication by password failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, "\tAuthentication by password succeeded.\n"); } - } else if (auth_pw & 2) { + } + else if(auth_pw & 2) { /* Or via keyboard-interactive */ - if (libssh2_userauth_keyboard_interactive(session, username, - &kbd_callback) ) { + if(libssh2_userauth_keyboard_interactive(session, username, + &kbd_callback) ) { fprintf(stderr, - "\tAuthentication by keyboard-interactive failed!\n"); + "\tAuthentication by keyboard-interactive failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, - "\tAuthentication by keyboard-interactive succeeded.\n"); + "\tAuthentication by keyboard-interactive succeeded.\n"); } - } else if (auth_pw & 4) { + } + else if(auth_pw & 4) { /* Or by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, keyfile1, - keyfile2, password)) { + if(libssh2_userauth_publickey_fromfile(session, username, keyfile1, + keyfile2, password)) { fprintf(stderr, "\tAuthentication by public key failed!\n"); goto shutdown; - } else { + } + else { fprintf(stderr, "\tAuthentication by public key succeeded.\n"); } - } else { + } + else { fprintf(stderr, "No supported authentication methods found!\n"); goto shutdown; } /* Request a shell */ - if (!(channel = libssh2_channel_open_session(session))) { + channel = libssh2_channel_open_session(session); + if(!channel) { fprintf(stderr, "Unable to open a session\n"); goto shutdown; } @@ -211,13 +219,13 @@ int main(int argc, char *argv[]) /* Request a terminal with 'vanilla' terminal emulation * See /etc/termcap for more options */ - if (libssh2_channel_request_pty(channel, "vanilla")) { + if(libssh2_channel_request_pty(channel, "vanilla")) { fprintf(stderr, "Failed requesting pty\n"); goto skip_shell; } /* Open a SHELL on that pty */ - if (libssh2_channel_shell(channel)) { + if(libssh2_channel_shell(channel)) { fprintf(stderr, "Unable to request shell on allocated pty\n"); goto shutdown; } @@ -236,7 +244,7 @@ int main(int argc, char *argv[]) */ skip_shell: - if (channel) { + if(channel) { libssh2_channel_free(channel); channel = NULL; } diff --git a/vendor/libssh2/example/ssh2_agent.c b/vendor/libssh2/example/ssh2_agent.c index 33a2998a5c..1cc508442c 100644 --- a/vendor/libssh2/example/ssh2_agent.c +++ b/vendor/libssh2/example/ssh2_agent.c @@ -36,7 +36,7 @@ #include #include -const char *username="username"; +const char *username = "username"; int main(int argc, char *argv[]) { @@ -54,16 +54,17 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) { + if(argc > 1) { hostaddr = inet_addr(argv[1]); - } else { + } + else { hostaddr = htonl(0x7F000001); } @@ -71,9 +72,9 @@ int main(int argc, char *argv[]) username = argv[2]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -81,7 +82,7 @@ int main(int argc, char *argv[]) * responsible for creating the socket establishing the connection */ sock = socket(AF_INET, SOCK_STREAM, 0); - if (sock == -1) { + if(sock == -1) { fprintf(stderr, "failed to create socket!\n"); rc = 1; goto shutdown; @@ -90,7 +91,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); goto shutdown; @@ -100,7 +101,7 @@ int main(int argc, char *argv[]) * banners, exchange keys, and setup crypto, compression, and MAC layers */ session = libssh2_session_init(); - if (libssh2_session_handshake(session, sock)) { + if(libssh2_session_handshake(session, sock)) { fprintf(stderr, "Failure establishing SSH session\n"); return 1; } @@ -120,43 +121,44 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); fprintf(stderr, "Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "publickey") == NULL) { + if(strstr(userauthlist, "publickey") == NULL) { fprintf(stderr, "\"publickey\" authentication is not supported\n"); goto shutdown; } /* Connect to the ssh-agent */ agent = libssh2_agent_init(session); - if (!agent) { + if(!agent) { fprintf(stderr, "Failure initializing ssh-agent support\n"); rc = 1; goto shutdown; } - if (libssh2_agent_connect(agent)) { + if(libssh2_agent_connect(agent)) { fprintf(stderr, "Failure connecting to ssh-agent\n"); rc = 1; goto shutdown; } - if (libssh2_agent_list_identities(agent)) { + if(libssh2_agent_list_identities(agent)) { fprintf(stderr, "Failure requesting identities to ssh-agent\n"); rc = 1; goto shutdown; } - while (1) { + while(1) { rc = libssh2_agent_get_identity(agent, &identity, prev_identity); - if (rc == 1) + if(rc == 1) break; - if (rc < 0) { + if(rc < 0) { fprintf(stderr, "Failure obtaining identity from ssh-agent support\n"); rc = 1; goto shutdown; } - if (libssh2_agent_userauth(agent, username, identity)) { + if(libssh2_agent_userauth(agent, username, identity)) { fprintf(stderr, "\tAuthentication with username %s and " "public key %s failed!\n", username, identity->comment); - } else { + } + else { fprintf(stderr, "\tAuthentication with username %s and " "public key %s succeeded!\n", username, identity->comment); @@ -164,7 +166,7 @@ int main(int argc, char *argv[]) } prev_identity = identity; } - if (rc) { + if(rc) { fprintf(stderr, "Couldn't continue authentication\n"); goto shutdown; } @@ -172,7 +174,8 @@ int main(int argc, char *argv[]) /* We're authenticated now. */ /* Request a shell */ - if (!(channel = libssh2_channel_open_session(session))) { + channel = libssh2_channel_open_session(session); + if(!channel) { fprintf(stderr, "Unable to open a session\n"); goto shutdown; } @@ -185,13 +188,13 @@ int main(int argc, char *argv[]) /* Request a terminal with 'vanilla' terminal emulation * See /etc/termcap for more options */ - if (libssh2_channel_request_pty(channel, "vanilla")) { + if(libssh2_channel_request_pty(channel, "vanilla")) { fprintf(stderr, "Failed requesting pty\n"); goto skip_shell; } /* Open a SHELL on that pty */ - if (libssh2_channel_shell(channel)) { + if(libssh2_channel_shell(channel)) { fprintf(stderr, "Unable to request shell on allocated pty\n"); goto shutdown; } @@ -210,7 +213,7 @@ int main(int argc, char *argv[]) */ skip_shell: - if (channel) { + if(channel) { libssh2_channel_free(channel); channel = NULL; } @@ -223,8 +226,10 @@ int main(int argc, char *argv[]) shutdown: - libssh2_agent_disconnect(agent); - libssh2_agent_free(agent); + if(agent) { + libssh2_agent_disconnect(agent); + libssh2_agent_free(agent); + } if(session) { libssh2_session_disconnect(session, @@ -232,7 +237,7 @@ int main(int argc, char *argv[]) libssh2_session_free(session); } - if (sock != -1) { + if(sock != -1) { #ifdef WIN32 closesocket(sock); #else diff --git a/vendor/libssh2/example/ssh2_agent_forwarding.c b/vendor/libssh2/example/ssh2_agent_forwarding.c new file mode 100644 index 0000000000..b99fc95fe7 --- /dev/null +++ b/vendor/libssh2/example/ssh2_agent_forwarding.c @@ -0,0 +1,292 @@ +/* + * Sample showing how to use libssh2 to request agent forwarding + * on the remote host. The command executed will run with agent forwarded + * so you should be able to do things like clone out protected git + * repos and such. + * + * The example uses agent authentication to ensure an agent to forward + * is running. + * + * Run it like this: + * + * $ ./ssh2_agent_forwarding 127.0.0.1 user "uptime" + * + */ + +#include "libssh2_config.h" +#include + +#ifdef HAVE_WINSOCK2_H +# include +#endif +#ifdef HAVE_SYS_SOCKET_H +# include +#endif +#ifdef HAVE_NETINET_IN_H +# include +#endif +#ifdef HAVE_SYS_SELECT_H +# include +#endif +# ifdef HAVE_UNISTD_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +# include +#endif + +#ifdef HAVE_SYS_TIME_H +# include +#endif +#include +#include +#include +#include +#include +#include + +static int waitsocket(int socket_fd, LIBSSH2_SESSION *session) +{ + struct timeval timeout; + int rc; + fd_set fd; + fd_set *writefd = NULL; + fd_set *readfd = NULL; + int dir; + + timeout.tv_sec = 10; + timeout.tv_usec = 0; + + FD_ZERO(&fd); + + FD_SET(socket_fd, &fd); + + /* now make sure we wait in the correct direction */ + dir = libssh2_session_block_directions(session); + + if(dir & LIBSSH2_SESSION_BLOCK_INBOUND) + readfd = &fd; + + if(dir & LIBSSH2_SESSION_BLOCK_OUTBOUND) + writefd = &fd; + + rc = select(socket_fd + 1, readfd, writefd, NULL, &timeout); + + return rc; +} + +int main(int argc, char *argv[]) +{ + const char *hostname = "127.0.0.1"; + const char *commandline = "uptime"; + const char *username = NULL; + unsigned long hostaddr; + int sock; + struct sockaddr_in sin; + LIBSSH2_SESSION *session; + LIBSSH2_CHANNEL *channel; + LIBSSH2_AGENT *agent = NULL; + struct libssh2_agent_publickey *identity, *prev_identity = NULL; + int rc; + int exitcode; + char *exitsignal = (char *)"none"; + int bytecount = 0; + +#ifdef WIN32 + WSADATA wsadata; + WSAStartup(MAKEWORD(2, 0), &wsadata); +#endif + if(argc < 2) { + fprintf(stderr, "At least IP and username arguments are required.\n"); + return 1; + } + /* must be ip address only */ + hostname = argv[1]; + username = argv[2]; + + if(argc > 3) { + commandline = argv[3]; + } + + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); + return 1; + } + + hostaddr = inet_addr(hostname); + + /* Ultra basic "connect to port 22 on localhost" + * Your code is responsible for creating the socket establishing the + * connection + */ + sock = socket(AF_INET, SOCK_STREAM, 0); + + sin.sin_family = AF_INET; + sin.sin_port = htons(22); + sin.sin_addr.s_addr = hostaddr; + if(connect(sock, (struct sockaddr*)(&sin), + sizeof(struct sockaddr_in)) != 0) { + fprintf(stderr, "failed to connect!\n"); + return -1; + } + + /* Create a session instance */ + session = libssh2_session_init(); + if(!session) + return -1; + + if(libssh2_session_handshake(session, sock) != 0) { + fprintf(stderr, "Failure establishing SSH session: %d\n", rc); + return -1; + } + + /* Connect to the ssh-agent */ + agent = libssh2_agent_init(session); + if(!agent) { + fprintf(stderr, "Failure initializing ssh-agent support\n"); + rc = 1; + goto shutdown; + } + if(libssh2_agent_connect(agent)) { + fprintf(stderr, "Failure connecting to ssh-agent\n"); + rc = 1; + goto shutdown; + } + if(libssh2_agent_list_identities(agent)) { + fprintf(stderr, "Failure requesting identities to ssh-agent\n"); + rc = 1; + goto shutdown; + } + while(1) { + rc = libssh2_agent_get_identity(agent, &identity, prev_identity); + if(rc == 1) + break; + if(rc < 0) { + fprintf(stderr, + "Failure obtaining identity from ssh-agent support\n"); + rc = 1; + goto shutdown; + } + if(libssh2_agent_userauth(agent, username, identity)) { + fprintf(stderr, "\tAuthentication with username %s and " + "public key %s failed!\n", + username, identity->comment); + } + else { + fprintf(stderr, "\tAuthentication with username %s and " + "public key %s succeeded!\n", + username, identity->comment); + break; + } + prev_identity = identity; + } + if(rc) { + fprintf(stderr, "Couldn't continue authentication\n"); + goto shutdown; + } + +#if 0 + libssh2_trace(session, ~0); +#endif + + /* Set session to non-blocking */ + libssh2_session_set_blocking(session, 0); + + /* Exec non-blocking on the remove host */ + while((channel = libssh2_channel_open_session(session)) == NULL && + libssh2_session_last_error(session, NULL, NULL, 0) == + LIBSSH2_ERROR_EAGAIN) { + waitsocket(sock, session); + } + if(channel == NULL) { + fprintf(stderr, "Error\n"); + exit(1); + } + while((rc = libssh2_channel_request_auth_agent(channel)) == + LIBSSH2_ERROR_EAGAIN) { + waitsocket(sock, session); + } + if(rc != 0) { + fprintf(stderr, "Error, couldn't request auth agent, error code %d.\n", + rc); + exit(1); + } + else { + fprintf(stdout, "\tAgent forwarding request succeeded!\n"); + } + while((rc = libssh2_channel_exec(channel, commandline)) == + LIBSSH2_ERROR_EAGAIN) { + waitsocket(sock, session); + } + if(rc != 0) { + fprintf(stderr, "Error\n"); + exit(1); + } + for(;;) { + /* loop until we block */ + int rc; + do { + char buffer[0x4000]; + rc = libssh2_channel_read(channel, buffer, sizeof(buffer) ); + if(rc > 0) { + int i; + bytecount += rc; + fprintf(stderr, "We read:\n"); + for(i = 0; i < rc; ++i) + fputc(buffer[i], stderr); + fprintf(stderr, "\n"); + } + else { + if(rc != LIBSSH2_ERROR_EAGAIN) + /* no need to output this for the EAGAIN case */ + fprintf(stderr, "libssh2_channel_read returned %d\n", rc); + } + } + while(rc > 0); + + /* this is due to blocking that would occur otherwise so we loop on + this condition */ + if(rc == LIBSSH2_ERROR_EAGAIN) { + waitsocket(sock, session); + } + else + break; + } + exitcode = 127; + while((rc = libssh2_channel_close(channel)) == LIBSSH2_ERROR_EAGAIN) { + waitsocket(sock, session); + } + if(rc == 0) { + exitcode = libssh2_channel_get_exit_status(channel); + libssh2_channel_get_exit_signal(channel, &exitsignal, + NULL, NULL, NULL, NULL, NULL); + } + + if(exitsignal) { + printf("\nGot signal: %s\n", exitsignal); + } + else { + printf("\nEXIT: %d bytecount: %d\n", exitcode, bytecount); + } + + libssh2_channel_free(channel); + channel = NULL; + +shutdown: + + libssh2_session_disconnect(session, + "Normal Shutdown, Thank you for playing"); + libssh2_session_free(session); + +#ifdef WIN32 + closesocket(sock); +#else + close(sock); +#endif + fprintf(stderr, "all done\n"); + + libssh2_exit(); + + return 0; +} diff --git a/vendor/libssh2/example/ssh2_echo.c b/vendor/libssh2/example/ssh2_echo.c index 782930d288..eae4165634 100644 --- a/vendor/libssh2/example/ssh2_echo.c +++ b/vendor/libssh2/example/ssh2_echo.c @@ -87,7 +87,7 @@ int main(int argc, char *argv[]) LIBSSH2_CHANNEL *channel; int rc; int exitcode = 0; - char *exitsignal=(char *)"none"; + char *exitsignal = (char *)"none"; size_t len; LIBSSH2_KNOWNHOSTS *nh; int type; @@ -96,27 +96,27 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) + if(argc > 1) /* must be ip address only */ hostname = argv[1]; - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -131,7 +131,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -139,7 +139,7 @@ int main(int argc, char *argv[]) /* Create a session instance */ session = libssh2_session_init(); - if (!session) + if(!session) return -1; /* tell libssh2 we want it all done non-blocking */ @@ -148,9 +148,9 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) == + while((rc = libssh2_session_handshake(session, sock)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; } @@ -193,11 +193,11 @@ int main(int argc, char *argv[]) } libssh2_knownhost_free(nh); - if ( strlen(password) != 0 ) { + if(strlen(password) != 0) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) == - LIBSSH2_ERROR_EAGAIN); - if (rc) { + while((rc = libssh2_userauth_password(session, username, password)) == + LIBSSH2_ERROR_EAGAIN); + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); exit(1); } @@ -206,22 +206,22 @@ int main(int argc, char *argv[]) libssh2_trace(session, LIBSSH2_TRACE_SOCKET); /* Exec non-blocking on the remove host */ - while( (channel = libssh2_channel_open_session(session)) == NULL && - libssh2_session_last_error(session,NULL,NULL,0) == - LIBSSH2_ERROR_EAGAIN ) { + while((channel = libssh2_channel_open_session(session)) == NULL && + libssh2_session_last_error(session, NULL, NULL, 0) == + LIBSSH2_ERROR_EAGAIN) { waitsocket(sock, session); } - if( channel == NULL ) { - fprintf(stderr,"Error\n"); - exit( 1 ); + if(channel == NULL) { + fprintf(stderr, "Error\n"); + exit(1); } - while( (rc = libssh2_channel_exec(channel, commandline)) == - LIBSSH2_ERROR_EAGAIN ) + while((rc = libssh2_channel_exec(channel, commandline)) == + LIBSSH2_ERROR_EAGAIN) waitsocket(sock, session); - if( rc != 0 ) { + if(rc != 0) { fprintf(stderr, "exec error\n"); - exit( 1 ); + exit(1); } else { LIBSSH2_POLLFD *fds = NULL; @@ -236,10 +236,11 @@ int main(int argc, char *argv[]) int rewrites = 0; int i; - for (i = 0; i < BUFSIZE; i++) + for(i = 0; i < BUFSIZE; i++) buffer[i] = 'A'; - if ((fds = malloc (sizeof (LIBSSH2_POLLFD))) == NULL) { + fds = malloc(sizeof (LIBSSH2_POLLFD)); + if(!fds) { fprintf(stderr, "malloc failed\n"); exit(1); } @@ -252,18 +253,18 @@ int main(int argc, char *argv[]) int rc = (libssh2_poll(fds, 1, 10)); int act = 0; - if (rc < 1) + if(rc < 1) continue; - if (fds[0].revents & LIBSSH2_POLLFD_POLLIN) { + if(fds[0].revents & LIBSSH2_POLLFD_POLLIN) { int n = libssh2_channel_read(channel, buffer, sizeof(buffer)); act++; - if (n == LIBSSH2_ERROR_EAGAIN) { + if(n == LIBSSH2_ERROR_EAGAIN) { rereads++; fprintf(stderr, "will read again\n"); } - else if (n < 0) { + else if(n < 0) { fprintf(stderr, "read failed\n"); exit(1); } @@ -274,20 +275,20 @@ int main(int argc, char *argv[]) } } - if (fds[0].revents & LIBSSH2_POLLFD_POLLOUT) { + if(fds[0].revents & LIBSSH2_POLLFD_POLLOUT) { act++; - if (totwritten < totsize) { + if(totwritten < totsize) { /* we have not written all data yet */ int left = totsize - totwritten; int size = (left < bufsize) ? left : bufsize; int n = libssh2_channel_write_ex(channel, 0, buffer, size); - if (n == LIBSSH2_ERROR_EAGAIN) { + if(n == LIBSSH2_ERROR_EAGAIN) { rewrites++; fprintf(stderr, "will write again\n"); } - else if (n < 0) { + else if(n < 0) { fprintf(stderr, "write failed\n"); exit(1); } @@ -295,20 +296,21 @@ int main(int argc, char *argv[]) totwritten += n; fprintf(stderr, "wrote %d bytes (%d in total)", n, totwritten); - if (left >= bufsize && n != bufsize) { + if(left >= bufsize && n != bufsize) { partials++; fprintf(stderr, " PARTIAL"); } fprintf(stderr, "\n"); } - } else { + } + else { /* all data written, send EOF */ rc = libssh2_channel_send_eof(channel); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { fprintf(stderr, "will send eof again\n"); } - else if (rc < 0) { + else if(rc < 0) { fprintf(stderr, "send eof failed\n"); exit(1); } @@ -320,23 +322,23 @@ int main(int argc, char *argv[]) } } - if (fds[0].revents & LIBSSH2_POLLFD_CHANNEL_CLOSED) { - if (!act) /* don't leave loop until we have read all data */ + if(fds[0].revents & LIBSSH2_POLLFD_CHANNEL_CLOSED) { + if(!act) /* don't leave loop until we have read all data */ running = 0; } } while(running); exitcode = 127; - while( (rc = libssh2_channel_close(channel)) == LIBSSH2_ERROR_EAGAIN ) + while((rc = libssh2_channel_close(channel)) == LIBSSH2_ERROR_EAGAIN) waitsocket(sock, session); - if( rc == 0 ) { - exitcode = libssh2_channel_get_exit_status( channel ); + if(rc == 0) { + exitcode = libssh2_channel_get_exit_status(channel); libssh2_channel_get_exit_signal(channel, &exitsignal, NULL, NULL, NULL, NULL, NULL); } - if (exitsignal) + if(exitsignal) fprintf(stderr, "\nGot signal: %s\n", exitsignal); libssh2_channel_free(channel); @@ -345,7 +347,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "\nrereads: %d rewrites: %d totwritten %d\n", rereads, rewrites, totwritten); - if (totwritten != totread) { + if(totwritten != totread) { fprintf(stderr, "\n*** FAIL bytes written: %d bytes " "read: %d ***\n", totwritten, totread); exit(1); diff --git a/vendor/libssh2/example/ssh2_exec.c b/vendor/libssh2/example/ssh2_exec.c index c83f0bc4b7..d33c6d9a8d 100644 --- a/vendor/libssh2/example/ssh2_exec.c +++ b/vendor/libssh2/example/ssh2_exec.c @@ -87,7 +87,7 @@ int main(int argc, char *argv[]) LIBSSH2_CHANNEL *channel; int rc; int exitcode; - char *exitsignal=(char *)"none"; + char *exitsignal = (char *)"none"; int bytecount = 0; size_t len; LIBSSH2_KNOWNHOSTS *nh; @@ -97,30 +97,30 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } #endif - if (argc > 1) + if(argc > 1) /* must be ip address only */ hostname = argv[1]; - if (argc > 2) { + if(argc > 2) { username = argv[2]; } - if (argc > 3) { + if(argc > 3) { password = argv[3]; } - if (argc > 4) { + if(argc > 4) { commandline = argv[4]; } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } @@ -135,7 +135,7 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -143,7 +143,7 @@ int main(int argc, char *argv[]) /* Create a session instance */ session = libssh2_session_init(); - if (!session) + if(!session) return -1; /* tell libssh2 we want it all done non-blocking */ @@ -152,9 +152,9 @@ int main(int argc, char *argv[]) /* ... start it up. This will trade welcome banners, exchange keys, * and setup crypto, compression, and MAC layers */ - while ((rc = libssh2_session_handshake(session, sock)) == + while((rc = libssh2_session_handshake(session, sock)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Failure establishing SSH session: %d\n", rc); return -1; } @@ -206,104 +206,95 @@ int main(int argc, char *argv[]) } libssh2_knownhost_free(nh); - if ( strlen(password) != 0 ) { + if(strlen(password) != 0) { /* We could authenticate via password */ - while ((rc = libssh2_userauth_password(session, username, password)) == + while((rc = libssh2_userauth_password(session, username, password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } } else { /* Or by public key */ - while ((rc = libssh2_userauth_publickey_fromfile(session, username, + while((rc = libssh2_userauth_publickey_fromfile(session, username, "/home/user/" ".ssh/id_rsa.pub", "/home/user/" ".ssh/id_rsa", password)) == LIBSSH2_ERROR_EAGAIN); - if (rc) { + if(rc) { fprintf(stderr, "\tAuthentication by public key failed\n"); goto shutdown; } } #if 0 - libssh2_trace(session, ~0 ); + libssh2_trace(session, ~0); #endif /* Exec non-blocking on the remove host */ - while( (channel = libssh2_channel_open_session(session)) == NULL && - libssh2_session_last_error(session,NULL,NULL,0) == - LIBSSH2_ERROR_EAGAIN ) - { + while((channel = libssh2_channel_open_session(session)) == NULL && + libssh2_session_last_error(session, NULL, NULL, 0) == + LIBSSH2_ERROR_EAGAIN) { waitsocket(sock, session); } - if( channel == NULL ) - { - fprintf(stderr,"Error\n"); - exit( 1 ); + if(channel == NULL) { + fprintf(stderr, "Error\n"); + exit(1); } - while( (rc = libssh2_channel_exec(channel, commandline)) == - LIBSSH2_ERROR_EAGAIN ) - { + while((rc = libssh2_channel_exec(channel, commandline)) == + LIBSSH2_ERROR_EAGAIN) { waitsocket(sock, session); } - if( rc != 0 ) - { - fprintf(stderr,"Error\n"); - exit( 1 ); + if(rc != 0) { + fprintf(stderr, "Error\n"); + exit(1); } - for( ;; ) - { + for(;;) { /* loop until we block */ int rc; - do - { + do { char buffer[0x4000]; - rc = libssh2_channel_read( channel, buffer, sizeof(buffer) ); - if( rc > 0 ) - { + rc = libssh2_channel_read(channel, buffer, sizeof(buffer) ); + if(rc > 0) { int i; bytecount += rc; fprintf(stderr, "We read:\n"); - for( i=0; i < rc; ++i ) - fputc( buffer[i], stderr); + for(i = 0; i < rc; ++i) + fputc(buffer[i], stderr); fprintf(stderr, "\n"); } else { - if( rc != LIBSSH2_ERROR_EAGAIN ) + if(rc != LIBSSH2_ERROR_EAGAIN) /* no need to output this for the EAGAIN case */ fprintf(stderr, "libssh2_channel_read returned %d\n", rc); } } - while( rc > 0 ); + while(rc > 0); /* this is due to blocking that would occur otherwise so we loop on this condition */ - if( rc == LIBSSH2_ERROR_EAGAIN ) - { + if(rc == LIBSSH2_ERROR_EAGAIN) { waitsocket(sock, session); } else break; } exitcode = 127; - while( (rc = libssh2_channel_close(channel)) == LIBSSH2_ERROR_EAGAIN ) + while((rc = libssh2_channel_close(channel)) == LIBSSH2_ERROR_EAGAIN) waitsocket(sock, session); - if( rc == 0 ) - { - exitcode = libssh2_channel_get_exit_status( channel ); + if(rc == 0) { + exitcode = libssh2_channel_get_exit_status(channel); libssh2_channel_get_exit_signal(channel, &exitsignal, NULL, NULL, NULL, NULL, NULL); } - if (exitsignal) + if(exitsignal) fprintf(stderr, "\nGot signal: %s\n", exitsignal); - else + else fprintf(stderr, "\nEXIT: %d bytecount: %d\n", exitcode, bytecount); libssh2_channel_free(channel); diff --git a/vendor/libssh2/example/subsystem_netconf.c b/vendor/libssh2/example/subsystem_netconf.c index 82c4941783..cef25fee31 100644 --- a/vendor/libssh2/example/subsystem_netconf.c +++ b/vendor/libssh2/example/subsystem_netconf.c @@ -57,12 +57,12 @@ static int netconf_write(LIBSSH2_CHANNEL *channel, const char *buf, size_t len) do { i = libssh2_channel_write(channel, buf, len); - if (i < 0) { + if(i < 0) { fprintf(stderr, "libssh2_channel_write: %d\n", i); return -1; } wr += i; - } while (i > 0 && wr < (ssize_t)len); + } while(i > 0 && wr < (ssize_t)len); return 0; } @@ -78,9 +78,9 @@ static int netconf_read_until(LIBSSH2_CHANNEL *channel, const char *endtag, do { len = libssh2_channel_read(channel, buf + rd, buflen - rd); - if (LIBSSH2_ERROR_EAGAIN == len) + if(LIBSSH2_ERROR_EAGAIN == len) continue; - else if (len < 0) { + else if(len < 0) { fprintf(stderr, "libssh2_channel_read: %d\n", (int)len); return -1; } @@ -92,13 +92,14 @@ static int netconf_read_until(LIBSSH2_CHANNEL *channel, const char *endtag, /* really, this MUST be replaced with proper XML parsing! */ endreply = strstr(buf, endtag); - if (endreply) + if(endreply) specialsequence = strstr(endreply, "]]>]]>"); - } while (!specialsequence && rd < buflen); + } while(!specialsequence && rd < buflen); - if (!specialsequence) { - fprintf(stderr, "%s: ]]>]]> not found! read buffer too small?\n", __func__); + if(!specialsequence) { + fprintf(stderr, "%s: ]]>]]> not found! read buffer too small?\n", + __func__); return -1; } @@ -125,8 +126,8 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } @@ -134,40 +135,41 @@ int main(int argc, char *argv[]) int sock = -1; #endif - if (argc > 1) + if(argc > 1) server_ip = argv[1]; - if (argc > 2) + if(argc > 2) username = argv[2]; - if (argc > 3) + if(argc > 3) password = argv[3]; - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } /* Connect to SSH server */ sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); #ifdef WIN32 - if (sock == INVALID_SOCKET) { + if(sock == INVALID_SOCKET) { fprintf(stderr, "failed to open socket!\n"); return -1; } #else - if (sock == -1) { + if(sock == -1) { perror("socket"); return -1; } #endif sin.sin_family = AF_INET; - if (INADDR_NONE == (sin.sin_addr.s_addr = inet_addr(server_ip))) { + sin.sin_addr.s_addr = inet_addr(server_ip); + if(INADDR_NONE == sin.sin_addr.s_addr) { fprintf(stderr, "inet_addr: Invalid IP address \"%s\"\n", server_ip); return -1; } sin.sin_port = htons(830); - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "Failed to connect to %s!\n", inet_ntoa(sin.sin_addr)); return -1; @@ -203,39 +205,41 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); fprintf(stderr, "Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "password")) + if(strstr(userauthlist, "password")) auth |= AUTH_PASSWORD; - if (strstr(userauthlist, "publickey")) + if(strstr(userauthlist, "publickey")) auth |= AUTH_PUBLICKEY; /* check for options */ if(argc > 4) { - if ((auth & AUTH_PASSWORD) && !strcasecmp(argv[4], "-p")) + if((auth & AUTH_PASSWORD) && !strcasecmp(argv[4], "-p")) auth = AUTH_PASSWORD; - if ((auth & AUTH_PUBLICKEY) && !strcasecmp(argv[4], "-k")) + if((auth & AUTH_PUBLICKEY) && !strcasecmp(argv[4], "-k")) auth = AUTH_PUBLICKEY; } - if (auth & AUTH_PASSWORD) { - if (libssh2_userauth_password(session, username, password)) { + if(auth & AUTH_PASSWORD) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else if (auth & AUTH_PUBLICKEY) { - if (libssh2_userauth_publickey_fromfile(session, username, keyfile1, + } + else if(auth & AUTH_PUBLICKEY) { + if(libssh2_userauth_publickey_fromfile(session, username, keyfile1, keyfile2, password)) { fprintf(stderr, "Authentication by public key failed!\n"); goto shutdown; } fprintf(stderr, "Authentication by public key succeeded.\n"); - } else { + } + else { fprintf(stderr, "No supported authentication methods found!\n"); goto shutdown; } /* open a channel */ channel = libssh2_channel_open_session(session); - if (!channel) { + if(!channel) { fprintf(stderr, "Could not open the channel!\n" "(Note that this can be a problem at the server!" " Please review the server logs.)\n"); @@ -243,7 +247,7 @@ int main(int argc, char *argv[]) } /* execute the subsystem on our channel */ - if (libssh2_channel_subsystem(channel, "netconf")) { + if(libssh2_channel_subsystem(channel, "netconf")) { fprintf(stderr, "Could not execute the \"netconf\" subsystem!\n" "(Note that this can be a problem at the server!" " Please review the server logs.)\n"); @@ -261,15 +265,16 @@ int main(int argc, char *argv[]) "" "\n" "]]>]]>\n%n", (int *)&len); - if (-1 == netconf_write(channel, buf, len)) + if(-1 == netconf_write(channel, buf, len)) goto shutdown; fprintf(stderr, "Reading NETCONF server \n"); len = netconf_read_until(channel, "", buf, sizeof(buf)); - if (-1 == len) + if(-1 == len) goto shutdown; - fprintf(stderr, "Got %d bytes:\n----------------------\n%s", (int)len, buf); + fprintf(stderr, "Got %d bytes:\n----------------------\n%s", + (int)len, buf); fprintf(stderr, "Sending NETCONF \n"); snprintf(buf, sizeof(buf), @@ -278,18 +283,19 @@ int main(int argc, char *argv[]) "" "\n" "]]>]]>\n%n", (int *)&len); - if (-1 == netconf_write(channel, buf, len)) + if(-1 == netconf_write(channel, buf, len)) goto shutdown; fprintf(stderr, "Reading NETCONF \n"); len = netconf_read_until(channel, "", buf, sizeof(buf)); - if (-1 == len) + if(-1 == len) goto shutdown; - fprintf(stderr, "Got %d bytes:\n----------------------\n%s", (int)len, buf); + fprintf(stderr, "Got %d bytes:\n----------------------\n%s", + (int)len, buf); shutdown: - if (channel) + if(channel) libssh2_channel_free(channel); libssh2_session_disconnect(session, "Client disconnecting normally"); libssh2_session_free(session); diff --git a/vendor/libssh2/example/tcpip-forward.c b/vendor/libssh2/example/tcpip-forward.c index 23513689a1..51ca17ab15 100644 --- a/vendor/libssh2/example/tcpip-forward.c +++ b/vendor/libssh2/example/tcpip-forward.c @@ -70,8 +70,8 @@ int main(int argc, char *argv[]) WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return 1; } @@ -79,48 +79,49 @@ int main(int argc, char *argv[]) int sock = -1, forwardsock = -1; #endif - if (argc > 1) + if(argc > 1) server_ip = argv[1]; - if (argc > 2) + if(argc > 2) username = argv[2]; - if (argc > 3) + if(argc > 3) password = argv[3]; - if (argc > 4) + if(argc > 4) remote_listenhost = argv[4]; - if (argc > 5) + if(argc > 5) remote_wantport = atoi(argv[5]); - if (argc > 6) + if(argc > 6) local_destip = argv[6]; - if (argc > 7) + if(argc > 7) local_destport = atoi(argv[7]); - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } /* Connect to SSH server */ sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); #ifdef WIN32 - if (sock == INVALID_SOCKET) { + if(sock == INVALID_SOCKET) { fprintf(stderr, "failed to open socket!\n"); return -1; } #else - if (sock == -1) { + if(sock == -1) { perror("socket"); return -1; } #endif sin.sin_family = AF_INET; - if (INADDR_NONE == (sin.sin_addr.s_addr = inet_addr(server_ip))) { + sin.sin_addr.s_addr = inet_addr(server_ip); + if(INADDR_NONE == sin.sin_addr.s_addr) { perror("inet_addr"); return -1; } sin.sin_port = htons(22); - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return -1; @@ -156,32 +157,34 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); fprintf(stderr, "Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "password")) + if(strstr(userauthlist, "password")) auth |= AUTH_PASSWORD; - if (strstr(userauthlist, "publickey")) + if(strstr(userauthlist, "publickey")) auth |= AUTH_PUBLICKEY; /* check for options */ if(argc > 8) { - if ((auth & AUTH_PASSWORD) && !strcasecmp(argv[8], "-p")) + if((auth & AUTH_PASSWORD) && !strcasecmp(argv[8], "-p")) auth = AUTH_PASSWORD; - if ((auth & AUTH_PUBLICKEY) && !strcasecmp(argv[8], "-k")) + if((auth & AUTH_PUBLICKEY) && !strcasecmp(argv[8], "-k")) auth = AUTH_PUBLICKEY; } - if (auth & AUTH_PASSWORD) { - if (libssh2_userauth_password(session, username, password)) { + if(auth & AUTH_PASSWORD) { + if(libssh2_userauth_password(session, username, password)) { fprintf(stderr, "Authentication by password failed.\n"); goto shutdown; } - } else if (auth & AUTH_PUBLICKEY) { - if (libssh2_userauth_publickey_fromfile(session, username, keyfile1, - keyfile2, password)) { + } + else if(auth & AUTH_PUBLICKEY) { + if(libssh2_userauth_publickey_fromfile(session, username, keyfile1, + keyfile2, password)) { fprintf(stderr, "\tAuthentication by public key failed!\n"); goto shutdown; } fprintf(stderr, "\tAuthentication by public key succeeded.\n"); - } else { + } + else { fprintf(stderr, "No supported authentication methods found!\n"); goto shutdown; } @@ -191,7 +194,7 @@ int main(int argc, char *argv[]) listener = libssh2_channel_forward_listen_ex(session, remote_listenhost, remote_wantport, &remote_listenport, 1); - if (!listener) { + if(!listener) { fprintf(stderr, "Could not start the tcpip-forward listener!\n" "(Note that this can be a problem at the server!" " Please review the server logs.)\n"); @@ -203,7 +206,7 @@ int main(int argc, char *argv[]) fprintf(stderr, "Waiting for remote connection\n"); channel = libssh2_channel_forward_accept(listener); - if (!channel) { + if(!channel) { fprintf(stderr, "Could not accept connection!\n" "(Note that this can be a problem at the server!" " Please review the server logs.)\n"); @@ -215,12 +218,12 @@ int main(int argc, char *argv[]) local_destip, local_destport); forwardsock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); #ifdef WIN32 - if (forwardsock == INVALID_SOCKET) { + if(forwardsock == INVALID_SOCKET) { fprintf(stderr, "failed to open forward socket!\n"); goto shutdown; } #else - if (forwardsock == -1) { + if(forwardsock == -1) { perror("socket"); goto shutdown; } @@ -228,11 +231,12 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(local_destport); - if (INADDR_NONE == (sin.sin_addr.s_addr = inet_addr(local_destip))) { + sin.sin_addr.s_addr = inet_addr(local_destip); + if(INADDR_NONE == sin.sin_addr.s_addr) { perror("inet_addr"); goto shutdown; } - if (-1 == connect(forwardsock, (struct sockaddr *)&sin, sinlen)) { + if(-1 == connect(forwardsock, (struct sockaddr *)&sin, sinlen)) { perror("connect"); goto shutdown; } @@ -243,22 +247,23 @@ int main(int argc, char *argv[]) /* Must use non-blocking IO hereafter due to the current libssh2 API */ libssh2_session_set_blocking(session, 0); - while (1) { + while(1) { FD_ZERO(&fds); FD_SET(forwardsock, &fds); tv.tv_sec = 0; tv.tv_usec = 100000; rc = select(forwardsock + 1, &fds, NULL, NULL, &tv); - if (-1 == rc) { + if(-1 == rc) { perror("select"); goto shutdown; } - if (rc && FD_ISSET(forwardsock, &fds)) { + if(rc && FD_ISSET(forwardsock, &fds)) { len = recv(forwardsock, buf, sizeof(buf), 0); - if (len < 0) { + if(len < 0) { perror("read"); goto shutdown; - } else if (0 == len) { + } + else if(0 == len) { fprintf(stderr, "The local server at %s:%d disconnected!\n", local_destip, local_destport); goto shutdown; @@ -266,31 +271,31 @@ int main(int argc, char *argv[]) wr = 0; do { i = libssh2_channel_write(channel, buf, len); - if (i < 0) { + if(i < 0) { fprintf(stderr, "libssh2_channel_write: %d\n", i); goto shutdown; } wr += i; } while(i > 0 && wr < len); } - while (1) { + while(1) { len = libssh2_channel_read(channel, buf, sizeof(buf)); - if (LIBSSH2_ERROR_EAGAIN == len) + if(LIBSSH2_ERROR_EAGAIN == len) break; - else if (len < 0) { + else if(len < 0) { fprintf(stderr, "libssh2_channel_read: %d", (int)len); goto shutdown; } wr = 0; - while (wr < len) { + while(wr < len) { i = send(forwardsock, buf + wr, len - wr, 0); - if (i <= 0) { + if(i <= 0) { perror("write"); goto shutdown; } wr += i; } - if (libssh2_channel_eof(channel)) { + if(libssh2_channel_eof(channel)) { fprintf(stderr, "The remote client at %s:%d disconnected!\n", remote_listenhost, remote_listenport); goto shutdown; @@ -304,9 +309,9 @@ int main(int argc, char *argv[]) #else close(forwardsock); #endif - if (channel) + if(channel) libssh2_channel_free(channel); - if (listener) + if(listener) libssh2_channel_forward_cancel(listener); libssh2_session_disconnect(session, "Client disconnecting normally"); libssh2_session_free(session); diff --git a/vendor/libssh2/example/x11.c b/vendor/libssh2/example/x11.c index dd01b3bcc8..c49b64ea9d 100644 --- a/vendor/libssh2/example/x11.c +++ b/vendor/libssh2/example/x11.c @@ -48,14 +48,14 @@ static void remove_node(struct chan_X11_list *elem) current_node = gp_x11_chan; - if (gp_x11_chan == elem) { + if(gp_x11_chan == elem) { gp_x11_chan = gp_x11_chan->next; free(current_node); return; } - while (current_node->next != NULL) { - if (current_node->next == elem) { + while(current_node->next != NULL) { + if(current_node->next == elem) { current_node->next = current_node->next->next; current_node = current_node->next; free(current_node); @@ -78,7 +78,7 @@ static int _raw_mode(void) struct termios tio; rc = tcgetattr(fileno(stdin), &tio); - if (rc != -1) { + if(rc != -1) { _saved_tio = tio; /* do the equivalent of cfmakeraw() manually, to build on Solaris */ tio.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP|INLCR|IGNCR|ICRNL|IXON); @@ -106,37 +106,40 @@ static int _normal_mode(void) static void x11_callback(LIBSSH2_SESSION *session, LIBSSH2_CHANNEL *channel, char *shost, int sport, void **abstract) { - const char * display = NULL; - char * ptr = NULL; - char * temp_buff = NULL; + const char *display = NULL; + char *ptr = NULL; + char *temp_buff = NULL; int display_port = 0; int sock = 0; int rc = 0; struct sockaddr_un addr; struct chan_X11_list *new; struct chan_X11_list *chan_iter; - + (void)session; + (void)shost; + (void)sport; + (void)abstract; /* * Connect to the display * Inspired by x11_connect_display in openssh */ display = getenv("DISPLAY"); - if ( display != NULL) { - if (strncmp( display, "unix:", 5) == 0 || + if(display != NULL) { + if(strncmp(display, "unix:", 5) == 0 || display[0] == ':') { /* Connect to the local unix domain */ ptr = strrchr(display, ':'); - temp_buff = (char *) calloc(strlen(ptr+1), sizeof(char)); - if (!temp_buff) { + temp_buff = (char *) calloc(strlen(ptr + 1), sizeof(char)); + if(!temp_buff) { perror("calloc"); return; } - memcpy(temp_buff, ptr+1, strlen(ptr+1)); + memcpy(temp_buff, ptr + 1, strlen(ptr + 1)); display_port = atoi(temp_buff); free(temp_buff); sock = socket(AF_UNIX, SOCK_STREAM, 0); - if (sock < 0) + if(sock < 0) return; memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; @@ -144,9 +147,9 @@ static void x11_callback(LIBSSH2_SESSION *session, LIBSSH2_CHANNEL *channel, _PATH_UNIX_X, display_port); rc = connect(sock, (struct sockaddr *) &addr, sizeof(addr)); - if (rc != -1){ + if(rc != -1) { /* Connection Successfull */ - if (gp_x11_chan == NULL) { + if(gp_x11_chan == NULL) { /* Calloc ensure that gp_X11_chan is full of 0 */ gp_x11_chan = (struct chan_X11_list *) calloc(1, sizeof(struct chan_X11_list)); @@ -156,7 +159,7 @@ static void x11_callback(LIBSSH2_SESSION *session, LIBSSH2_CHANNEL *channel, } else { chan_iter = gp_x11_chan; - while (chan_iter->next != NULL) + while(chan_iter->next != NULL) chan_iter = chan_iter->next; /* Create the new Node */ new = (struct chan_X11_list *) @@ -180,10 +183,10 @@ static void x11_callback(LIBSSH2_SESSION *session, LIBSSH2_CHANNEL *channel, */ static int x11_send_receive(LIBSSH2_CHANNEL *channel, int sock) { - char * buf = NULL; - int bufsize = 8192; - int rc = 0; - int nfds = 1; + char *buf = NULL; + int bufsize = 8192; + int rc = 0; + int nfds = 1; LIBSSH2_POLLFD *fds = NULL; fd_set set; struct timeval timeval_out; @@ -192,12 +195,14 @@ static int x11_send_receive(LIBSSH2_CHANNEL *channel, int sock) FD_ZERO(&set); - FD_SET(sock,&set); + FD_SET(sock, &set); - if ((buf = calloc (bufsize, sizeof(char))) == NULL) + buf = calloc(bufsize, sizeof(char)); + if(!buf) return 0; - if ((fds = malloc (sizeof (LIBSSH2_POLLFD))) == NULL) { + fds = malloc(sizeof (LIBSSH2_POLLFD)); + if(!fds) { free(buf); return 0; } @@ -208,18 +213,18 @@ static int x11_send_receive(LIBSSH2_CHANNEL *channel, int sock) fds[0].revents = LIBSSH2_POLLFD_POLLIN; rc = libssh2_poll(fds, nfds, 0); - if (rc >0) { + if(rc >0) { rc = libssh2_channel_read(channel, buf, bufsize); write(sock, buf, rc); } - rc = select(sock+1, &set, NULL, NULL, &timeval_out); - if (rc > 0) { + rc = select(sock + 1, &set, NULL, NULL, &timeval_out); + if(rc > 0) { memset((void *)buf, 0, bufsize); /* Data in sock*/ rc = read(sock, buf, bufsize); - if (rc > 0) { + if(rc > 0) { libssh2_channel_write(channel, buf, rc); } else { @@ -230,7 +235,7 @@ static int x11_send_receive(LIBSSH2_CHANNEL *channel, int sock) free(fds); free(buf); - if (libssh2_channel_eof(channel) == 1) { + if(libssh2_channel_eof(channel) == 1) { return -1; } return 0; @@ -270,10 +275,10 @@ main (int argc, char *argv[]) timeval_out.tv_usec = 10; - if (argc > 3) { - hostaddr = inet_addr(argv[1]); - username = argv[2]; - password = argv[3]; + if(argc > 3) { + hostaddr = inet_addr(argv[1]); + username = argv[2]; + password = argv[3]; } else { fprintf(stderr, "Usage: %s destination username password", @@ -281,51 +286,55 @@ main (int argc, char *argv[]) return -1; } - if (argc > 4) { + if(argc > 4) { set_debug_on = 1; - fprintf (stderr, "DEBUG is ON: %d\n", set_debug_on); + fprintf(stderr, "DEBUG is ON: %d\n", set_debug_on); } - rc = libssh2_init (0); - if (rc != 0) { - fprintf (stderr, "libssh2 initialization failed (%d)\n", rc); + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); return 1; } - sock = socket (AF_INET, SOCK_STREAM, 0); - if (sock == -1) { + sock = socket(AF_INET, SOCK_STREAM, 0); + if(sock == -1) { perror("socket"); return -1; } sin.sin_family = AF_INET; - sin.sin_port = htons (22); + sin.sin_port = htons(22); sin.sin_addr.s_addr = hostaddr; rc = connect(sock, (struct sockaddr *) &sin, sizeof(struct sockaddr_in)); - if (rc != 0) { - fprintf (stderr, "Failed to established connection!\n"); + if(rc != 0) { + fprintf(stderr, "Failed to established connection!\n"); return -1; } /* Open a session */ session = libssh2_session_init(); rc = libssh2_session_handshake(session, sock); - if (rc != 0) { + if(rc != 0) { fprintf(stderr, "Failed Start the SSH session\n"); return -1; } - if (set_debug_on == 1) + if(set_debug_on == 1) libssh2_trace(session, LIBSSH2_TRACE_CONN); + /* ignore pedantic warnings by gcc on the callback argument */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" /* Set X11 Callback */ libssh2_session_callback_set(session, LIBSSH2_CALLBACK_X11, (void *)x11_callback); +#pragma GCC diagnostic pop /* Authenticate via password */ rc = libssh2_userauth_password(session, username, password); - if (rc != 0) { + if(rc != 0) { fprintf(stderr, "Failed to authenticate\n"); session_shutdown(session); close(sock); @@ -334,7 +343,7 @@ main (int argc, char *argv[]) /* Open a channel */ channel = libssh2_channel_open_session(session); - if ( channel == NULL ) { + if(channel == NULL) { fprintf(stderr, "Failed to open a new channel\n"); session_shutdown(session); close(sock); @@ -343,8 +352,8 @@ main (int argc, char *argv[]) /* Request a PTY */ - rc = libssh2_channel_request_pty( channel, "xterm"); - if (rc != 0) { + rc = libssh2_channel_request_pty(channel, "xterm"); + if(rc != 0) { fprintf(stderr, "Failed to request a pty\n"); session_shutdown(session); close(sock); @@ -352,8 +361,8 @@ main (int argc, char *argv[]) } /* Request X11 */ - rc = libssh2_channel_x11_req(channel,0); - if(rc!=0) { + rc = libssh2_channel_x11_req(channel, 0); + if(rc != 0) { fprintf(stderr, "Failed to request X11 forwarding\n"); session_shutdown(session); close(sock); @@ -362,7 +371,7 @@ main (int argc, char *argv[]) /* Request a shell */ rc = libssh2_channel_shell(channel); - if (rc!=0) { + if(rc != 0) { fprintf(stderr, "Failed to open a shell\n"); session_shutdown(session); close(sock); @@ -370,7 +379,7 @@ main (int argc, char *argv[]) } rc = _raw_mode(); - if (rc != 0) { + if(rc != 0) { fprintf(stderr, "Failed to entered in raw mode\n"); session_shutdown(session); close(sock); @@ -380,15 +389,15 @@ main (int argc, char *argv[]) memset(&w_size, 0, sizeof(struct winsize)); memset(&w_size_bck, 0, sizeof(struct winsize)); - while (1) { + while(1) { FD_ZERO(&set); - FD_SET(fileno(stdin),&set); + FD_SET(fileno(stdin), &set); /* Search if a resize pty has to be send */ ioctl(fileno(stdin), TIOCGWINSZ, &w_size); - if ((w_size.ws_row != w_size_bck.ws_row) || - (w_size.ws_col != w_size_bck.ws_col)) { + if((w_size.ws_row != w_size_bck.ws_row) || + (w_size.ws_col != w_size_bck.ws_col)) { w_size_bck = w_size; libssh2_channel_request_pty_size(channel, @@ -396,10 +405,12 @@ main (int argc, char *argv[]) w_size.ws_row); } - if ((buf = calloc (bufsiz, sizeof(char))) == NULL) + buf = calloc(bufsiz, sizeof(char)); + if(buf == NULL) break; - if ((fds = malloc (sizeof (LIBSSH2_POLLFD))) == NULL) { + fds = malloc(sizeof (LIBSSH2_POLLFD)); + if(fds == NULL) { free(buf); break; } @@ -410,25 +421,25 @@ main (int argc, char *argv[]) fds[0].revents = LIBSSH2_POLLFD_POLLIN; rc = libssh2_poll(fds, nfds, 0); - if (rc >0) { + if(rc >0) { libssh2_channel_read(channel, buf, sizeof(buf)); fprintf(stdout, "%s", buf); fflush(stdout); } /* Looping on X clients */ - if (gp_x11_chan != NULL) { + if(gp_x11_chan != NULL) { current_node = gp_x11_chan; } else current_node = NULL; - while (current_node != NULL) { + while(current_node != NULL) { struct chan_X11_list *next_node; rc = x11_send_receive(current_node->chan, current_node->sock); next_node = current_node->next; - if (rc == -1){ - shutdown(current_node->sock,SHUT_RDWR); + if(rc == -1) { + shutdown(current_node->sock, SHUT_RDWR); close(current_node->sock); remove_node(current_node); } @@ -437,25 +448,25 @@ main (int argc, char *argv[]) } - rc = select(fileno(stdin)+1,&set,NULL,NULL,&timeval_out); - if (rc > 0) { + rc = select(fileno(stdin) + 1, &set, NULL, NULL, &timeval_out); + if(rc > 0) { /* Data in stdin*/ - rc = read(fileno(stdin), buf,1); - if (rc > 0) - libssh2_channel_write(channel,buf, sizeof(buf)); + rc = read(fileno(stdin), buf, 1); + if(rc > 0) + libssh2_channel_write(channel, buf, sizeof(buf)); } - free (fds); - free (buf); + free(fds); + free(buf); - if (libssh2_channel_eof (channel) == 1) { - break; + if(libssh2_channel_eof (channel) == 1) { + break; } } - if (channel) { - libssh2_channel_free (channel); - channel = NULL; + if(channel) { + libssh2_channel_free(channel); + channel = NULL; } _normal_mode(); diff --git a/vendor/libssh2/include/libssh2.h b/vendor/libssh2/include/libssh2.h index 34d2842106..88f4bbcdfb 100644 --- a/vendor/libssh2/include/libssh2.h +++ b/vendor/libssh2/include/libssh2.h @@ -40,18 +40,18 @@ #ifndef LIBSSH2_H #define LIBSSH2_H 1 -#define LIBSSH2_COPYRIGHT "2004-2016 The libssh2 project and its contributors." +#define LIBSSH2_COPYRIGHT "2004-2019 The libssh2 project and its contributors." /* We use underscore instead of dash when appending DEV in dev versions just to make the BANNER define (used by src/session.c) be a valid SSH banner. Release versions have no appended strings and may of course not have dashes either. */ -#define LIBSSH2_VERSION "1.8.0" +#define LIBSSH2_VERSION "1.10.0" /* The numeric version number is also available "in parts" by using these defines: */ #define LIBSSH2_VERSION_MAJOR 1 -#define LIBSSH2_VERSION_MINOR 8 +#define LIBSSH2_VERSION_MINOR 10 #define LIBSSH2_VERSION_PATCH 0 /* This is the numeric version of the libssh2 version number, meant for easier @@ -69,7 +69,7 @@ and it is always a greater number in a more recent release. It makes comparisons with greater than and less than work. */ -#define LIBSSH2_VERSION_NUM 0x010800 +#define LIBSSH2_VERSION_NUM 0x010a00 /* * This is the date and time when the full source package was created. The @@ -80,7 +80,7 @@ * * "Mon Feb 12 11:35:33 UTC 2007" */ -#define LIBSSH2_TIMESTAMP "Tue Oct 25 06:44:33 UTC 2016" +#define LIBSSH2_TIMESTAMP "Sun 29 Aug 2021 08:37:50 PM UTC" #ifndef RC_INVOKED @@ -121,18 +121,28 @@ extern "C" { #if (defined(NETWARE) && !defined(__NOVELL_LIBC__)) # include typedef unsigned char uint8_t; +typedef unsigned short int uint16_t; typedef unsigned int uint32_t; +typedef int int32_t; +typedef unsigned long long uint64_t; +typedef long long int64_t; #endif #ifdef _MSC_VER typedef unsigned char uint8_t; +typedef unsigned short int uint16_t; typedef unsigned int uint32_t; +typedef __int32 int32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; typedef unsigned __int64 libssh2_uint64_t; typedef __int64 libssh2_int64_t; -#ifndef ssize_t +#if (!defined(HAVE_SSIZE_T) && !defined(ssize_t)) typedef SSIZE_T ssize_t; +#define HAVE_SSIZE_T #endif #else +#include typedef unsigned long long libssh2_uint64_t; typedef long long libssh2_int64_t; #endif @@ -203,7 +213,8 @@ typedef off_t libssh2_struct_stat_size; #ifndef LIBSSH2_STRUCT_STAT_SIZE_FORMAT # ifdef __VMS -/* We have to roll our own format here because %z is a C99-ism we don't have. */ +/* We have to roll our own format here because %z is a C99-ism we don't + have. */ # if __USE_OFF64_T || __USING_STD_STAT # define LIBSSH2_STRUCT_STAT_SIZE_FORMAT "%Ld" # else @@ -219,14 +230,16 @@ typedef off_t libssh2_struct_stat_size; /* Part of every banner, user specified or not */ #define LIBSSH2_SSH_BANNER "SSH-2.0-libssh2_" LIBSSH2_VERSION -/* We *could* add a comment here if we so chose */ -#define LIBSSH2_SSH_DEFAULT_BANNER LIBSSH2_SSH_BANNER -#define LIBSSH2_SSH_DEFAULT_BANNER_WITH_CRLF LIBSSH2_SSH_DEFAULT_BANNER "\r\n" +#define LIBSSH2_SSH_DEFAULT_BANNER LIBSSH2_SSH_BANNER +#define LIBSSH2_SSH_DEFAULT_BANNER_WITH_CRLF LIBSSH2_SSH_DEFAULT_BANNER "\r\n" -/* Default generate and safe prime sizes for diffie-hellman-group-exchange-sha1 */ -#define LIBSSH2_DH_GEX_MINGROUP 1024 -#define LIBSSH2_DH_GEX_OPTGROUP 1536 -#define LIBSSH2_DH_GEX_MAXGROUP 2048 +/* Default generate and safe prime sizes for + diffie-hellman-group-exchange-sha1 */ +#define LIBSSH2_DH_GEX_MINGROUP 2048 +#define LIBSSH2_DH_GEX_OPTGROUP 4096 +#define LIBSSH2_DH_GEX_MAXGROUP 8192 + +#define LIBSSH2_DH_MAX_MODULUS_BITS 16384 /* Defaults for pty requests */ #define LIBSSH2_TERM_WIDTH 80 @@ -259,14 +272,14 @@ typedef off_t libssh2_struct_stat_size; typedef struct _LIBSSH2_USERAUTH_KBDINT_PROMPT { - char* text; + char *text; unsigned int length; unsigned char echo; } LIBSSH2_USERAUTH_KBDINT_PROMPT; typedef struct _LIBSSH2_USERAUTH_KBDINT_RESPONSE { - char* text; + char *text; unsigned int length; } LIBSSH2_USERAUTH_KBDINT_RESPONSE; @@ -277,10 +290,10 @@ typedef struct _LIBSSH2_USERAUTH_KBDINT_RESPONSE /* 'keyboard-interactive' authentication callback */ #define LIBSSH2_USERAUTH_KBDINT_RESPONSE_FUNC(name_) \ - void name_(const char* name, int name_len, const char* instruction, \ + void name_(const char *name, int name_len, const char *instruction, \ int instruction_len, int num_prompts, \ - const LIBSSH2_USERAUTH_KBDINT_PROMPT* prompts, \ - LIBSSH2_USERAUTH_KBDINT_RESPONSE* responses, void **abstract) + const LIBSSH2_USERAUTH_KBDINT_PROMPT *prompts, \ + LIBSSH2_USERAUTH_KBDINT_RESPONSE *responses, void **abstract) /* Callbacks for special SSH packets */ #define LIBSSH2_IGNORE_FUNC(name) \ @@ -314,12 +327,14 @@ typedef struct _LIBSSH2_USERAUTH_KBDINT_RESPONSE LIBSSH2_CHANNEL *channel, void **channel_abstract) /* I/O callbacks */ -#define LIBSSH2_RECV_FUNC(name) ssize_t name(libssh2_socket_t socket, \ - void *buffer, size_t length, \ - int flags, void **abstract) -#define LIBSSH2_SEND_FUNC(name) ssize_t name(libssh2_socket_t socket, \ - const void *buffer, size_t length,\ - int flags, void **abstract) +#define LIBSSH2_RECV_FUNC(name) \ + ssize_t name(libssh2_socket_t socket, \ + void *buffer, size_t length, \ + int flags, void **abstract) +#define LIBSSH2_SEND_FUNC(name) \ + ssize_t name(libssh2_socket_t socket, \ + const void *buffer, size_t length, \ + int flags, void **abstract) /* libssh2_session_callback_set() constants */ #define LIBSSH2_CALLBACK_IGNORE 0 @@ -341,6 +356,7 @@ typedef struct _LIBSSH2_USERAUTH_KBDINT_RESPONSE #define LIBSSH2_METHOD_COMP_SC 7 #define LIBSSH2_METHOD_LANG_CS 8 #define LIBSSH2_METHOD_LANG_SC 9 +#define LIBSSH2_METHOD_SIGN_ALGO 10 /* flags */ #define LIBSSH2_FLAG_SIGPIPE 1 @@ -403,11 +419,16 @@ typedef struct _LIBSSH2_POLLFD { /* Hash Types */ #define LIBSSH2_HOSTKEY_HASH_MD5 1 #define LIBSSH2_HOSTKEY_HASH_SHA1 2 +#define LIBSSH2_HOSTKEY_HASH_SHA256 3 /* Hostkey Types */ -#define LIBSSH2_HOSTKEY_TYPE_UNKNOWN 0 -#define LIBSSH2_HOSTKEY_TYPE_RSA 1 -#define LIBSSH2_HOSTKEY_TYPE_DSS 2 +#define LIBSSH2_HOSTKEY_TYPE_UNKNOWN 0 +#define LIBSSH2_HOSTKEY_TYPE_RSA 1 +#define LIBSSH2_HOSTKEY_TYPE_DSS 2 +#define LIBSSH2_HOSTKEY_TYPE_ECDSA_256 3 +#define LIBSSH2_HOSTKEY_TYPE_ECDSA_384 4 +#define LIBSSH2_HOSTKEY_TYPE_ECDSA_521 5 +#define LIBSSH2_HOSTKEY_TYPE_ED25519 6 /* Disconnect Codes (defined by SSH protocol) */ #define SSH_DISCONNECT_HOST_NOT_ALLOWED_TO_CONNECT 1 @@ -453,7 +474,8 @@ typedef struct _LIBSSH2_POLLFD { #define LIBSSH2_ERROR_FILE -16 #define LIBSSH2_ERROR_METHOD_NONE -17 #define LIBSSH2_ERROR_AUTHENTICATION_FAILED -18 -#define LIBSSH2_ERROR_PUBLICKEY_UNRECOGNIZED LIBSSH2_ERROR_AUTHENTICATION_FAILED +#define LIBSSH2_ERROR_PUBLICKEY_UNRECOGNIZED \ + LIBSSH2_ERROR_AUTHENTICATION_FAILED #define LIBSSH2_ERROR_PUBLICKEY_UNVERIFIED -19 #define LIBSSH2_ERROR_CHANNEL_OUTOFORDER -20 #define LIBSSH2_ERROR_CHANNEL_FAILURE -21 @@ -482,6 +504,11 @@ typedef struct _LIBSSH2_POLLFD { #define LIBSSH2_ERROR_ENCRYPT -44 #define LIBSSH2_ERROR_BAD_SOCKET -45 #define LIBSSH2_ERROR_KNOWN_HOSTS -46 +#define LIBSSH2_ERROR_CHANNEL_WINDOW_FULL -47 +#define LIBSSH2_ERROR_KEYFILE_AUTH_FAILED -48 +#define LIBSSH2_ERROR_RANDGEN -49 +#define LIBSSH2_ERROR_MISSING_USERAUTH_BANNER -50 +#define LIBSSH2_ERROR_ALGO_UNSUPPORTED -51 /* this is a define to provide the old (<= 1.2.7) name */ #define LIBSSH2_ERROR_BANNER_NONE LIBSSH2_ERROR_BANNER_RECV @@ -524,14 +551,14 @@ LIBSSH2_API void libssh2_free(LIBSSH2_SESSION *session, void *ptr); * * Fills algs with a list of supported acryptographic algorithms. Returns a * non-negative number (number of supported algorithms) on success or a - * negative number (an eror code) on failure. + * negative number (an error code) on failure. * * NOTE: on success, algs must be deallocated (by calling libssh2_free) when * not needed anymore */ LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, int method_type, - const char*** algs); + const char ***algs); /* Session API */ LIBSSH2_API LIBSSH2_SESSION * @@ -579,7 +606,7 @@ LIBSSH2_API int libssh2_session_last_error(LIBSSH2_SESSION *session, LIBSSH2_API int libssh2_session_last_errno(LIBSSH2_SESSION *session); LIBSSH2_API int libssh2_session_set_last_error(LIBSSH2_SESSION* session, int errcode, - const char* errmsg); + const char *errmsg); LIBSSH2_API int libssh2_session_block_directions(LIBSSH2_SESSION *session); LIBSSH2_API int libssh2_session_flag(LIBSSH2_SESSION *session, int flag, @@ -590,14 +617,18 @@ LIBSSH2_API const char *libssh2_session_banner_get(LIBSSH2_SESSION *session); LIBSSH2_API char *libssh2_userauth_list(LIBSSH2_SESSION *session, const char *username, unsigned int username_len); +LIBSSH2_API int libssh2_userauth_banner(LIBSSH2_SESSION *session, + char **banner); LIBSSH2_API int libssh2_userauth_authenticated(LIBSSH2_SESSION *session); -LIBSSH2_API int libssh2_userauth_password_ex(LIBSSH2_SESSION *session, - const char *username, - unsigned int username_len, - const char *password, - unsigned int password_len, - LIBSSH2_PASSWD_CHANGEREQ_FUNC((*passwd_change_cb))); +LIBSSH2_API int +libssh2_userauth_password_ex(LIBSSH2_SESSION *session, + const char *username, + unsigned int username_len, + const char *password, + unsigned int password_len, + LIBSSH2_PASSWD_CHANGEREQ_FUNC + ((*passwd_change_cb))); #define libssh2_userauth_password(session, username, password) \ libssh2_userauth_password_ex((session), (username), \ @@ -624,7 +655,8 @@ libssh2_userauth_publickey(LIBSSH2_SESSION *session, const char *username, const unsigned char *pubkeydata, size_t pubkeydata_len, - LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC((*sign_callback)), + LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC + ((*sign_callback)), void **abstract); LIBSSH2_API int @@ -664,7 +696,7 @@ libssh2_userauth_publickey_frommemory(LIBSSH2_SESSION *session, * response_callback is provided with filled by library prompts array, * but client must allocate and fill individual responses. Responses * array is already allocated. Responses data will be freed by libssh2 - * after callback return, but before subsequent callback invokation. + * after callback return, but before subsequent callback invocation. */ LIBSSH2_API int libssh2_userauth_keyboard_interactive_ex(LIBSSH2_SESSION* session, @@ -694,7 +726,7 @@ LIBSSH2_API int libssh2_poll(LIBSSH2_POLLFD *fds, unsigned int nfds, #define SSH_EXTENDED_DATA_STDERR 1 -/* Returned by any function that would block during a read/write opperation */ +/* Returned by any function that would block during a read/write operation */ #define LIBSSH2CHANNEL_EAGAIN LIBSSH2_ERROR_EAGAIN LIBSSH2_API LIBSSH2_CHANNEL * @@ -716,7 +748,8 @@ libssh2_channel_direct_tcpip_ex(LIBSSH2_SESSION *session, const char *host, LIBSSH2_API LIBSSH2_LISTENER * libssh2_channel_forward_listen_ex(LIBSSH2_SESSION *session, const char *host, - int port, int *bound_port, int queue_maxsize); + int port, int *bound_port, + int queue_maxsize); #define libssh2_channel_forward_listen(session, port) \ libssh2_channel_forward_listen_ex((session), NULL, (port), NULL, 16) @@ -736,6 +769,8 @@ LIBSSH2_API int libssh2_channel_setenv_ex(LIBSSH2_CHANNEL *channel, (unsigned int)strlen(varname), (value), \ (unsigned int)strlen(value)) +LIBSSH2_API int libssh2_channel_request_auth_agent(LIBSSH2_CHANNEL *channel); + LIBSSH2_API int libssh2_channel_request_pty_ex(LIBSSH2_CHANNEL *channel, const char *term, unsigned int term_len, @@ -747,15 +782,17 @@ LIBSSH2_API int libssh2_channel_request_pty_ex(LIBSSH2_CHANNEL *channel, libssh2_channel_request_pty_ex((channel), (term), \ (unsigned int)strlen(term), \ NULL, 0, \ - LIBSSH2_TERM_WIDTH, LIBSSH2_TERM_HEIGHT, \ - LIBSSH2_TERM_WIDTH_PX, LIBSSH2_TERM_HEIGHT_PX) + LIBSSH2_TERM_WIDTH, \ + LIBSSH2_TERM_HEIGHT, \ + LIBSSH2_TERM_WIDTH_PX, \ + LIBSSH2_TERM_HEIGHT_PX) LIBSSH2_API int libssh2_channel_request_pty_size_ex(LIBSSH2_CHANNEL *channel, int width, int height, int width_px, int height_px); #define libssh2_channel_request_pty_size(channel, width, height) \ - libssh2_channel_request_pty_size_ex( (channel), (width), (height), 0, 0) + libssh2_channel_request_pty_size_ex((channel), (width), (height), 0, 0) LIBSSH2_API int libssh2_channel_x11_req_ex(LIBSSH2_CHANNEL *channel, int single_connection, @@ -817,8 +854,9 @@ LIBSSH2_API ssize_t libssh2_channel_write_ex(LIBSSH2_CHANNEL *channel, #define libssh2_channel_write(channel, buf, buflen) \ libssh2_channel_write_ex((channel), 0, (buf), (buflen)) -#define libssh2_channel_write_stderr(channel, buf, buflen) \ - libssh2_channel_write_ex((channel), SSH_EXTENDED_DATA_STDERR, (buf), (buflen)) +#define libssh2_channel_write_stderr(channel, buf, buflen) \ + libssh2_channel_write_ex((channel), SSH_EXTENDED_DATA_STDERR, \ + (buf), (buflen)) LIBSSH2_API unsigned long libssh2_channel_window_write_ex(LIBSSH2_CHANNEL *channel, @@ -855,7 +893,7 @@ LIBSSH2_API int libssh2_channel_handle_extended_data2(LIBSSH2_CHANNEL *channel, libssh2_channel_handle_extended_data((channel), \ (ignore) ? \ LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE : \ - LIBSSH2_CHANNEL_EXTENDED_DATA_NORMAL ) + LIBSSH2_CHANNEL_EXTENDED_DATA_NORMAL) #define LIBSSH2_CHANNEL_FLUSH_EXTENDED_DATA -1 #define LIBSSH2_CHANNEL_FLUSH_ALL -2 @@ -959,13 +997,17 @@ libssh2_knownhost_init(LIBSSH2_SESSION *session); #define LIBSSH2_KNOWNHOST_KEYENC_RAW (1<<16) #define LIBSSH2_KNOWNHOST_KEYENC_BASE64 (2<<16) -/* type of key (2 bits) */ -#define LIBSSH2_KNOWNHOST_KEY_MASK (7<<18) -#define LIBSSH2_KNOWNHOST_KEY_SHIFT 18 -#define LIBSSH2_KNOWNHOST_KEY_RSA1 (1<<18) -#define LIBSSH2_KNOWNHOST_KEY_SSHRSA (2<<18) -#define LIBSSH2_KNOWNHOST_KEY_SSHDSS (3<<18) -#define LIBSSH2_KNOWNHOST_KEY_UNKNOWN (7<<18) +/* type of key (4 bits) */ +#define LIBSSH2_KNOWNHOST_KEY_MASK (15<<18) +#define LIBSSH2_KNOWNHOST_KEY_SHIFT 18 +#define LIBSSH2_KNOWNHOST_KEY_RSA1 (1<<18) +#define LIBSSH2_KNOWNHOST_KEY_SSHRSA (2<<18) +#define LIBSSH2_KNOWNHOST_KEY_SSHDSS (3<<18) +#define LIBSSH2_KNOWNHOST_KEY_ECDSA_256 (4<<18) +#define LIBSSH2_KNOWNHOST_KEY_ECDSA_384 (5<<18) +#define LIBSSH2_KNOWNHOST_KEY_ECDSA_521 (6<<18) +#define LIBSSH2_KNOWNHOST_KEY_ED25519 (7<<18) +#define LIBSSH2_KNOWNHOST_KEY_UNKNOWN (15<<18) LIBSSH2_API int libssh2_knownhost_add(LIBSSH2_KNOWNHOSTS *hosts, @@ -1133,7 +1175,7 @@ libssh2_knownhost_writefile(LIBSSH2_KNOWNHOSTS *hosts, * libssh2_knownhost_get() * * Traverse the internal list of known hosts. Pass NULL to 'prev' to get - * the first one. Or pass a poiner to the previously returned one to get the + * the first one. Or pass a pointer to the previously returned one to get the * next. * * Returns: @@ -1189,7 +1231,7 @@ libssh2_agent_list_identities(LIBSSH2_AGENT *agent); * libssh2_agent_get_identity() * * Traverse the internal list of public keys. Pass NULL to 'prev' to get - * the first one. Or pass a poiner to the previously returned one to get the + * the first one. Or pass a pointer to the previously returned one to get the * next. * * Returns: @@ -1233,6 +1275,24 @@ libssh2_agent_disconnect(LIBSSH2_AGENT *agent); LIBSSH2_API void libssh2_agent_free(LIBSSH2_AGENT *agent); +/* + * libssh2_agent_set_identity_path() + * + * Allows a custom agent identity socket path beyond SSH_AUTH_SOCK env + * + */ +LIBSSH2_API void +libssh2_agent_set_identity_path(LIBSSH2_AGENT *agent, + const char *path); + +/* + * libssh2_agent_get_identity_path() + * + * Returns the custom agent identity socket path if set + * + */ +LIBSSH2_API const char * +libssh2_agent_get_identity_path(LIBSSH2_AGENT *agent); /* * libssh2_keepalive_config() @@ -1247,9 +1307,9 @@ libssh2_agent_free(LIBSSH2_AGENT *agent); * Note that non-blocking applications are responsible for sending the * keepalive messages using libssh2_keepalive_send(). */ -LIBSSH2_API void libssh2_keepalive_config (LIBSSH2_SESSION *session, - int want_reply, - unsigned interval); +LIBSSH2_API void libssh2_keepalive_config(LIBSSH2_SESSION *session, + int want_reply, + unsigned interval); /* * libssh2_keepalive_send() @@ -1259,8 +1319,8 @@ LIBSSH2_API void libssh2_keepalive_config (LIBSSH2_SESSION *session, * it again. Returns 0 on success, or LIBSSH2_ERROR_SOCKET_SEND on * I/O errors. */ -LIBSSH2_API int libssh2_keepalive_send (LIBSSH2_SESSION *session, - int *seconds_to_next); +LIBSSH2_API int libssh2_keepalive_send(LIBSSH2_SESSION *session, + int *seconds_to_next); /* NOTE NOTE NOTE libssh2_trace() has no function in builds that aren't built with debug @@ -1278,11 +1338,11 @@ LIBSSH2_API int libssh2_trace(LIBSSH2_SESSION *session, int bitmask); #define LIBSSH2_TRACE_SOCKET (1<<9) typedef void (*libssh2_trace_handler_func)(LIBSSH2_SESSION*, - void*, + void *, const char *, size_t); LIBSSH2_API int libssh2_trace_sethandler(LIBSSH2_SESSION *session, - void* context, + void *context, libssh2_trace_handler_func callback); #ifdef __cplusplus diff --git a/vendor/libssh2/include/libssh2_publickey.h b/vendor/libssh2/include/libssh2_publickey.h index 0979e23cb1..5dbdcf9253 100644 --- a/vendor/libssh2/include/libssh2_publickey.h +++ b/vendor/libssh2/include/libssh2_publickey.h @@ -81,16 +81,18 @@ extern "C" { #endif /* Publickey Subsystem */ -LIBSSH2_API LIBSSH2_PUBLICKEY *libssh2_publickey_init(LIBSSH2_SESSION *session); - -LIBSSH2_API int libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, - const unsigned char *name, - unsigned long name_len, - const unsigned char *blob, - unsigned long blob_len, char overwrite, - unsigned long num_attrs, - const libssh2_publickey_attribute attrs[]); -#define libssh2_publickey_add(pkey, name, blob, blob_len, overwrite, \ +LIBSSH2_API LIBSSH2_PUBLICKEY * +libssh2_publickey_init(LIBSSH2_SESSION *session); + +LIBSSH2_API int +libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, + const unsigned char *name, + unsigned long name_len, + const unsigned char *blob, + unsigned long blob_len, char overwrite, + unsigned long num_attrs, + const libssh2_publickey_attribute attrs[]); +#define libssh2_publickey_add(pkey, name, blob, blob_len, overwrite, \ num_attrs, attrs) \ libssh2_publickey_add_ex((pkey), (name), strlen(name), (blob), (blob_len), \ (overwrite), (num_attrs), (attrs)) @@ -107,8 +109,9 @@ LIBSSH2_API int libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY *pkey, unsigned long *num_keys, libssh2_publickey_list **pkey_list); -LIBSSH2_API void libssh2_publickey_list_free(LIBSSH2_PUBLICKEY *pkey, - libssh2_publickey_list *pkey_list); +LIBSSH2_API void +libssh2_publickey_list_free(LIBSSH2_PUBLICKEY *pkey, + libssh2_publickey_list *pkey_list); LIBSSH2_API int libssh2_publickey_shutdown(LIBSSH2_PUBLICKEY *pkey); diff --git a/vendor/libssh2/include/libssh2_sftp.h b/vendor/libssh2/include/libssh2_sftp.h index 677faf2fd9..476ea87046 100644 --- a/vendor/libssh2/include/libssh2_sftp.h +++ b/vendor/libssh2/include/libssh2_sftp.h @@ -79,6 +79,9 @@ typedef struct _LIBSSH2_SFTP_STATVFS LIBSSH2_SFTP_STATVFS; #define LIBSSH2_SFTP_READLINK 1 #define LIBSSH2_SFTP_REALPATH 2 +/* Flags for sftp_mkdir() */ +#define LIBSSH2_SFTP_DEFAULT_MODE -1 + /* SFTP attribute flag bits */ #define LIBSSH2_SFTP_ATTR_SIZE 0x00000001 #define LIBSSH2_SFTP_ATTR_UIDGID 0x00000002 @@ -186,32 +189,32 @@ struct _LIBSSH2_SFTP_STATVFS { #define LIBSSH2_FXF_EXCL 0x00000020 /* SFTP Status Codes (returned by libssh2_sftp_last_error() ) */ -#define LIBSSH2_FX_OK 0 -#define LIBSSH2_FX_EOF 1 -#define LIBSSH2_FX_NO_SUCH_FILE 2 -#define LIBSSH2_FX_PERMISSION_DENIED 3 -#define LIBSSH2_FX_FAILURE 4 -#define LIBSSH2_FX_BAD_MESSAGE 5 -#define LIBSSH2_FX_NO_CONNECTION 6 -#define LIBSSH2_FX_CONNECTION_LOST 7 -#define LIBSSH2_FX_OP_UNSUPPORTED 8 -#define LIBSSH2_FX_INVALID_HANDLE 9 -#define LIBSSH2_FX_NO_SUCH_PATH 10 -#define LIBSSH2_FX_FILE_ALREADY_EXISTS 11 -#define LIBSSH2_FX_WRITE_PROTECT 12 -#define LIBSSH2_FX_NO_MEDIA 13 -#define LIBSSH2_FX_NO_SPACE_ON_FILESYSTEM 14 -#define LIBSSH2_FX_QUOTA_EXCEEDED 15 -#define LIBSSH2_FX_UNKNOWN_PRINCIPLE 16 /* Initial mis-spelling */ -#define LIBSSH2_FX_UNKNOWN_PRINCIPAL 16 -#define LIBSSH2_FX_LOCK_CONFlICT 17 /* Initial mis-spelling */ -#define LIBSSH2_FX_LOCK_CONFLICT 17 -#define LIBSSH2_FX_DIR_NOT_EMPTY 18 -#define LIBSSH2_FX_NOT_A_DIRECTORY 19 -#define LIBSSH2_FX_INVALID_FILENAME 20 -#define LIBSSH2_FX_LINK_LOOP 21 - -/* Returned by any function that would block during a read/write opperation */ +#define LIBSSH2_FX_OK 0UL +#define LIBSSH2_FX_EOF 1UL +#define LIBSSH2_FX_NO_SUCH_FILE 2UL +#define LIBSSH2_FX_PERMISSION_DENIED 3UL +#define LIBSSH2_FX_FAILURE 4UL +#define LIBSSH2_FX_BAD_MESSAGE 5UL +#define LIBSSH2_FX_NO_CONNECTION 6UL +#define LIBSSH2_FX_CONNECTION_LOST 7UL +#define LIBSSH2_FX_OP_UNSUPPORTED 8UL +#define LIBSSH2_FX_INVALID_HANDLE 9UL +#define LIBSSH2_FX_NO_SUCH_PATH 10UL +#define LIBSSH2_FX_FILE_ALREADY_EXISTS 11UL +#define LIBSSH2_FX_WRITE_PROTECT 12UL +#define LIBSSH2_FX_NO_MEDIA 13UL +#define LIBSSH2_FX_NO_SPACE_ON_FILESYSTEM 14UL +#define LIBSSH2_FX_QUOTA_EXCEEDED 15UL +#define LIBSSH2_FX_UNKNOWN_PRINCIPLE 16UL /* Initial mis-spelling */ +#define LIBSSH2_FX_UNKNOWN_PRINCIPAL 16UL +#define LIBSSH2_FX_LOCK_CONFlICT 17UL /* Initial mis-spelling */ +#define LIBSSH2_FX_LOCK_CONFLICT 17UL +#define LIBSSH2_FX_DIR_NOT_EMPTY 18UL +#define LIBSSH2_FX_NOT_A_DIRECTORY 19UL +#define LIBSSH2_FX_INVALID_FILENAME 20UL +#define LIBSSH2_FX_LINK_LOOP 21UL + +/* Returned by any function that would block during a read/write operation */ #define LIBSSH2SFTP_EAGAIN LIBSSH2_ERROR_EAGAIN /* SFTP API */ @@ -221,12 +224,13 @@ LIBSSH2_API unsigned long libssh2_sftp_last_error(LIBSSH2_SFTP *sftp); LIBSSH2_API LIBSSH2_CHANNEL *libssh2_sftp_get_channel(LIBSSH2_SFTP *sftp); /* File / Directory Ops */ -LIBSSH2_API LIBSSH2_SFTP_HANDLE *libssh2_sftp_open_ex(LIBSSH2_SFTP *sftp, - const char *filename, - unsigned int filename_len, - unsigned long flags, - long mode, int open_type); -#define libssh2_sftp_open(sftp, filename, flags, mode) \ +LIBSSH2_API LIBSSH2_SFTP_HANDLE * +libssh2_sftp_open_ex(LIBSSH2_SFTP *sftp, + const char *filename, + unsigned int filename_len, + unsigned long flags, + long mode, int open_type); +#define libssh2_sftp_open(sftp, filename, flags, mode) \ libssh2_sftp_open_ex((sftp), (filename), strlen(filename), (flags), \ (mode), LIBSSH2_SFTP_OPENFILE) #define libssh2_sftp_opendir(sftp, path) \ @@ -328,7 +332,8 @@ LIBSSH2_API int libssh2_sftp_symlink_ex(LIBSSH2_SFTP *sftp, const char *path, unsigned int path_len, char *target, - unsigned int target_len, int link_type); + unsigned int target_len, + int link_type); #define libssh2_sftp_symlink(sftp, orig, linkpath) \ libssh2_sftp_symlink_ex((sftp), (orig), strlen(orig), (linkpath), \ strlen(linkpath), LIBSSH2_SFTP_SYMLINK) diff --git a/vendor/libssh2/install-sh b/vendor/libssh2/install-sh index 4d4a9519ea..ec298b5374 100755 --- a/vendor/libssh2/install-sh +++ b/vendor/libssh2/install-sh @@ -1,7 +1,7 @@ #!/bin/sh # install - install a program, script, or datafile -scriptversion=2005-05-14.22 +scriptversion=2020-11-14.01; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the @@ -35,42 +35,62 @@ scriptversion=2005-05-14.22 # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent -# `make' implicit rules from creating a file called install from it +# 'make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written -# from scratch. It can only install one file at a time, a restriction -# shared with many OS's install programs. +# from scratch. -# set DOITPROG to echo to test this script +tab=' ' +nl=' +' +IFS=" $tab$nl" -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit="${DOITPROG-}" +# Set DOITPROG to "echo" to test this script. -# put in absolute paths if you don't have them in your path; or use env. vars. +doit=${DOITPROG-} +doit_exec=${doit:-exec} -mvprog="${MVPROG-mv}" -cpprog="${CPPROG-cp}" -chmodprog="${CHMODPROG-chmod}" -chownprog="${CHOWNPROG-chown}" -chgrpprog="${CHGRPPROG-chgrp}" -stripprog="${STRIPPROG-strip}" -rmprog="${RMPROG-rm}" -mkdirprog="${MKDIRPROG-mkdir}" +# Put in absolute file names if you don't have them in your path; +# or use environment vars. -chmodcmd="$chmodprog 0755" -chowncmd= +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +# Create dirs (including intermediate dirs) using mode 755. +# This is like GNU 'install' as of coreutils 8.32 (2020). +mkdir_umask=22 + +backupsuffix= chgrpcmd= -stripcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog rmcmd="$rmprog -f" -mvcmd="$mvprog" +stripcmd= + src= dst= dir_arg= -dstarg= -no_target_directory= +dst_arg= + +copy_on_change=false +is_target_a_directory=possibly -usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... @@ -80,108 +100,187 @@ In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --c (ignored) --d create directories instead of installing files. --g GROUP $chgrpprog installed files to GROUP. --m MODE $chmodprog installed files to MODE. --o USER $chownprog installed files to USER. --s $stripprog installed files. --t DIRECTORY install into DIRECTORY. --T report an error if DSTFILE is a directory. ---help display this help and exit. ---version display version info and exit. + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -p pass -p to $cpprog. + -s $stripprog installed files. + -S SUFFIX attempt to back up existing files, with suffix SUFFIX. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG + +By default, rm is invoked with -f; when overridden with RMPROG, +it's up to you to specify -f if you want it. + +If -S is not specified, no backups are attempted. + +Email bug reports to bug-automake@gnu.org. +Automake home page: https://www.gnu.org/software/automake/ " -while test -n "$1"; do +while test $# -ne 0; do case $1 in - -c) shift - continue;; + -c) ;; + + -C) copy_on_change=true;; - -d) dir_arg=true - shift - continue;; + -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" - shift - shift - continue;; + shift;; --help) echo "$usage"; exit $?;; - -m) chmodcmd="$chmodprog $2" - shift - shift - continue;; + -m) mode=$2 + case $mode in + *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; -o) chowncmd="$chownprog $2" - shift - shift - continue;; + shift;; + + -p) cpprog="$cpprog -p";; - -s) stripcmd=$stripprog - shift - continue;; + -s) stripcmd=$stripprog;; - -t) dstarg=$2 - shift - shift - continue;; + -S) backupsuffix="$2" + shift;; - -T) no_target_directory=true - shift - continue;; + -t) + is_target_a_directory=always + dst_arg=$2 + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) is_target_a_directory=never;; --version) echo "$0 $scriptversion"; exit $?;; - *) # When -d is used, all remaining arguments are directories to create. - # When -t is used, the destination is already specified. - test -n "$dir_arg$dstarg" && break - # Otherwise, the last argument is the destination. Remove it from $@. - for arg - do - if test -n "$dstarg"; then - # $@ is not empty: it contains at least $arg. - set fnord "$@" "$dstarg" - shift # fnord - fi - shift # arg - dstarg=$arg - done - break;; + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; esac + shift done -if test -z "$1"; then +# We allow the use of options -d and -T together, by making -d +# take the precedence; this is for compatibility with GNU install. + +if test -n "$dir_arg"; then + if test -n "$dst_arg"; then + echo "$0: target directory not allowed when installing a directory." >&2 + exit 1 + fi +fi + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi - # It's OK to call `install-sh -d' without argument. + # It's OK to call 'install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi +if test -z "$dir_arg"; then + if test $# -gt 1 || test "$is_target_a_directory" = always; then + if test ! -d "$dst_arg"; then + echo "$0: $dst_arg: Is not a directory." >&2 + exit 1 + fi + fi +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + for src do - # Protect names starting with `-'. + # Protect names problematic for 'test' and other utilities. case $src in - -*) src=./$src ;; + -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src - src= - - if test -d "$dst"; then - mkdircmd=: - chmodcmd= - else - mkdircmd=$mkdirprog + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + # Don't chown directories that already exist. + if test $dstdir_status = 0; then + chowncmd="" fi else + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. @@ -190,82 +289,185 @@ do exit 1 fi - if test -z "$dstarg"; then + if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi + dst=$dst_arg - dst=$dstarg - # Protect names starting with `-'. - case $dst in - -*) dst=./$dst ;; - esac - - # If destination is a directory, append the input filename; won't work - # if double slashes aren't ignored. + # If destination is a directory, append the input filename. if test -d "$dst"; then - if test -n "$no_target_directory"; then - echo "$0: $dstarg: Is a directory" >&2 - exit 1 + if test "$is_target_a_directory" = never; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 fi - dst=$dst/`basename "$src"` + dstdir=$dst + dstbase=`basename "$src"` + case $dst in + */) dst=$dst$dstbase;; + *) dst=$dst/$dstbase;; + esac + dstdir_status=0 + else + dstdir=`dirname "$dst"` + test -d "$dstdir" + dstdir_status=$? fi fi - # This sed command emulates the dirname command. - dstdir=`echo "$dst" | sed -e 's,/*$,,;s,[^/]*$,,;s,/*$,,;s,^$,.,'` + case $dstdir in + */) dstdirslash=$dstdir;; + *) dstdirslash=$dstdir/;; + esac - # Make sure that the destination directory exists. + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + # The $RANDOM variable is not portable (e.g., dash). Use it + # here however when possible just to lower collision chance. + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + + trap ' + ret=$? + rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null + exit $ret + ' 0 + + # Because "mkdir -p" follows existing symlinks and we likely work + # directly in world-writeable /tmp, make sure that the '$tmpdir' + # directory is successfully created first before we actually test + # 'mkdir -p'. + if (umask $mkdir_umask && + $mkdirprog $mkdir_mode "$tmpdir" && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + test_tmpdir="$tmpdir/a" + ls_ld_tmpdir=`ls -ld "$test_tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$test_tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null + fi + trap '' 0;; + esac - # Skip lots of stat calls in the usual case. - if test ! -d "$dstdir"; then - defaultIFS=' - ' - IFS="${IFS-$defaultIFS}" + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else - oIFS=$IFS - # Some sh's can't handle IFS=/ for some reason. - IFS='%' - set x `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` - shift - IFS=$oIFS + # mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. - pathcomp= + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac - while test $# -ne 0 ; do - pathcomp=$pathcomp$1 + oIFS=$IFS + IFS=/ + set -f + set fnord $dstdir shift - if test ! -d "$pathcomp"; then - $mkdirprog "$pathcomp" - # mkdir can fail with a `File exist' error in case several - # install-sh are creating the directory concurrently. This - # is OK. - test -d "$pathcomp" || exit + set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true fi - pathcomp=$pathcomp/ - done + fi fi if test -n "$dir_arg"; then - $doit $mkdircmd "$dst" \ - && { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \ - && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \ - && { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \ - && { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; } - + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else - dstfile=`basename "$dst"` # Make a couple of temp file names in the proper directory. - dsttmp=$dstdir/_inst.$$_ - rmtmp=$dstdir/_rm.$$_ + dsttmp=${dstdirslash}_inst.$$_ + rmtmp=${dstdirslash}_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 - trap '(exit $?); exit' 1 2 13 15 # Copy the file name to the temp name. - $doit $cpprog "$src" "$dsttmp" && + (umask $cp_umask && + { test -z "$stripcmd" || { + # Create $dsttmp read-write so that cp doesn't create it read-only, + # which would cause strip to fail. + if test -z "$doit"; then + : >"$dsttmp" # No need to fork-exec 'touch'. + else + $doit touch "$dsttmp" + fi + } + } && + $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # @@ -273,51 +475,67 @@ do # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ - && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ - && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ - && { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } && - - # Now rename the file to the real destination. - { $doit $mvcmd -f "$dsttmp" "$dstdir/$dstfile" 2>/dev/null \ - || { - # The rename failed, perhaps because mv can't rename something else - # to itself, or perhaps because mv is so ancient that it does not - # support -f. - - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - if test -f "$dstdir/$dstfile"; then - $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \ - || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \ - || { - echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2 - (exit 1); exit 1 - } - else - : - fi - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dstdir/$dstfile" - } - } - fi || { (exit 1); exit 1; } -done + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + set +f && + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # If $backupsuffix is set, and the file being installed + # already exists, attempt a backup. Don't worry if it fails, + # e.g., if mv doesn't support -f. + if test -n "$backupsuffix" && test -f "$dst"; then + $doit $mvcmd -f "$dst" "$dst$backupsuffix" 2>/dev/null + fi -# The final little trick to "correctly" pass the exit status to the exit trap. -{ - (exit 0); exit 0 -} + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-end: "$" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" # End: diff --git a/vendor/libssh2/ltmain.sh b/vendor/libssh2/ltmain.sh index a736cf9942..21e5e07847 100644 --- a/vendor/libssh2/ltmain.sh +++ b/vendor/libssh2/ltmain.sh @@ -31,7 +31,7 @@ PROGRAM=libtool PACKAGE=libtool -VERSION="2.4.6 Debian-2.4.6-2" +VERSION="2.4.6 Debian-2.4.6-15" package_revision=2.4.6 @@ -387,7 +387,7 @@ EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. # putting '$debug_cmd' at the start of all your functions, you can get # bash to show function call trace with: # -# debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name +# debug_cmd='echo "${FUNCNAME[0]} $*" >&2' bash your-script-name debug_cmd=${debug_cmd-":"} exit_cmd=: @@ -1370,7 +1370,7 @@ func_lt_ver () #! /bin/sh # Set a version string for this script. -scriptversion=2014-01-07.03; # UTC +scriptversion=2015-10-07.11; # UTC # A portable, pluggable option parser for Bourne shell. # Written by Gary V. Vaughan, 2010 @@ -1530,6 +1530,8 @@ func_run_hooks () { $debug_cmd + _G_rc_run_hooks=false + case " $hookable_fns " in *" $1 "*) ;; *) func_fatal_error "'$1' does not support hook funcions.n" ;; @@ -1538,16 +1540,16 @@ func_run_hooks () eval _G_hook_fns=\$$1_hooks; shift for _G_hook in $_G_hook_fns; do - eval $_G_hook '"$@"' - - # store returned options list back into positional - # parameters for next 'cmd' execution. - eval _G_hook_result=\$${_G_hook}_result - eval set dummy "$_G_hook_result"; shift + if eval $_G_hook '"$@"'; then + # store returned options list back into positional + # parameters for next 'cmd' execution. + eval _G_hook_result=\$${_G_hook}_result + eval set dummy "$_G_hook_result"; shift + _G_rc_run_hooks=: + fi done - func_quote_for_eval ${1+"$@"} - func_run_hooks_result=$func_quote_for_eval_result + $_G_rc_run_hooks && func_run_hooks_result=$_G_hook_result } @@ -1557,10 +1559,16 @@ func_run_hooks () ## --------------- ## # In order to add your own option parsing hooks, you must accept the -# full positional parameter list in your hook function, remove any -# options that you action, and then pass back the remaining unprocessed +# full positional parameter list in your hook function, you may remove/edit +# any options that you action, and then pass back the remaining unprocessed # options in '_result', escaped suitably for -# 'eval'. Like this: +# 'eval'. In this case you also must return $EXIT_SUCCESS to let the +# hook's caller know that it should pay attention to +# '_result'. Returning $EXIT_FAILURE signalizes that +# arguments are left untouched by the hook and therefore caller will ignore the +# result variable. +# +# Like this: # # my_options_prep () # { @@ -1570,9 +1578,11 @@ func_run_hooks () # usage_message=$usage_message' # -s, --silent don'\''t print informational messages # ' -# -# func_quote_for_eval ${1+"$@"} -# my_options_prep_result=$func_quote_for_eval_result +# # No change in '$@' (ignored completely by this hook). There is +# # no need to do the equivalent (but slower) action: +# # func_quote_for_eval ${1+"$@"} +# # my_options_prep_result=$func_quote_for_eval_result +# false # } # func_add_hook func_options_prep my_options_prep # @@ -1581,25 +1591,37 @@ func_run_hooks () # { # $debug_cmd # +# args_changed=false +# # # Note that for efficiency, we parse as many options as we can # # recognise in a loop before passing the remainder back to the # # caller on the first unrecognised argument we encounter. # while test $# -gt 0; do # opt=$1; shift # case $opt in -# --silent|-s) opt_silent=: ;; +# --silent|-s) opt_silent=: +# args_changed=: +# ;; # # Separate non-argument short options: # -s*) func_split_short_opt "$_G_opt" # set dummy "$func_split_short_opt_name" \ # "-$func_split_short_opt_arg" ${1+"$@"} # shift +# args_changed=: # ;; -# *) set dummy "$_G_opt" "$*"; shift; break ;; +# *) # Make sure the first unrecognised option "$_G_opt" +# # is added back to "$@", we could need that later +# # if $args_changed is true. +# set dummy "$_G_opt" ${1+"$@"}; shift; break ;; # esac # done # -# func_quote_for_eval ${1+"$@"} -# my_silent_option_result=$func_quote_for_eval_result +# if $args_changed; then +# func_quote_for_eval ${1+"$@"} +# my_silent_option_result=$func_quote_for_eval_result +# fi +# +# $args_changed # } # func_add_hook func_parse_options my_silent_option # @@ -1611,16 +1633,32 @@ func_run_hooks () # $opt_silent && $opt_verbose && func_fatal_help "\ # '--silent' and '--verbose' options are mutually exclusive." # -# func_quote_for_eval ${1+"$@"} -# my_option_validation_result=$func_quote_for_eval_result +# false # } # func_add_hook func_validate_options my_option_validation # -# You'll alse need to manually amend $usage_message to reflect the extra +# You'll also need to manually amend $usage_message to reflect the extra # options you parse. It's preferable to append if you can, so that # multiple option parsing hooks can be added safely. +# func_options_finish [ARG]... +# ---------------------------- +# Finishing the option parse loop (call 'func_options' hooks ATM). +func_options_finish () +{ + $debug_cmd + + _G_func_options_finish_exit=false + if func_run_hooks func_options ${1+"$@"}; then + func_options_finish_result=$func_run_hooks_result + _G_func_options_finish_exit=: + fi + + $_G_func_options_finish_exit +} + + # func_options [ARG]... # --------------------- # All the functions called inside func_options are hookable. See the @@ -1630,17 +1668,28 @@ func_options () { $debug_cmd - func_options_prep ${1+"$@"} - eval func_parse_options \ - ${func_options_prep_result+"$func_options_prep_result"} - eval func_validate_options \ - ${func_parse_options_result+"$func_parse_options_result"} + _G_rc_options=false - eval func_run_hooks func_options \ - ${func_validate_options_result+"$func_validate_options_result"} + for my_func in options_prep parse_options validate_options options_finish + do + if eval func_$my_func '${1+"$@"}'; then + eval _G_res_var='$'"func_${my_func}_result" + eval set dummy "$_G_res_var" ; shift + _G_rc_options=: + fi + done + + # Save modified positional parameters for caller. As a top-level + # options-parser function we always need to set the 'func_options_result' + # variable (regardless the $_G_rc_options value). + if $_G_rc_options; then + func_options_result=$_G_res_var + else + func_quote_for_eval ${1+"$@"} + func_options_result=$func_quote_for_eval_result + fi - # save modified positional parameters for caller - func_options_result=$func_run_hooks_result + $_G_rc_options } @@ -1649,9 +1698,9 @@ func_options () # All initialisations required before starting the option parse loop. # Note that when calling hook functions, we pass through the list of # positional parameters. If a hook function modifies that list, and -# needs to propogate that back to rest of this script, then the complete +# needs to propagate that back to rest of this script, then the complete # modified list must be put in 'func_run_hooks_result' before -# returning. +# returning $EXIT_SUCCESS (otherwise $EXIT_FAILURE is returned). func_hookable func_options_prep func_options_prep () { @@ -1661,10 +1710,14 @@ func_options_prep () opt_verbose=false opt_warning_types= - func_run_hooks func_options_prep ${1+"$@"} + _G_rc_options_prep=false + if func_run_hooks func_options_prep ${1+"$@"}; then + _G_rc_options_prep=: + # save modified positional parameters for caller + func_options_prep_result=$func_run_hooks_result + fi - # save modified positional parameters for caller - func_options_prep_result=$func_run_hooks_result + $_G_rc_options_prep } @@ -1678,18 +1731,20 @@ func_parse_options () func_parse_options_result= + _G_rc_parse_options=false # this just eases exit handling while test $# -gt 0; do # Defer to hook functions for initial option parsing, so they # get priority in the event of reusing an option name. - func_run_hooks func_parse_options ${1+"$@"} - - # Adjust func_parse_options positional parameters to match - eval set dummy "$func_run_hooks_result"; shift + if func_run_hooks func_parse_options ${1+"$@"}; then + eval set dummy "$func_run_hooks_result"; shift + _G_rc_parse_options=: + fi # Break out of the loop if we already parsed every option. test $# -gt 0 || break + _G_match_parse_options=: _G_opt=$1 shift case $_G_opt in @@ -1704,7 +1759,10 @@ func_parse_options () ;; --warnings|--warning|-W) - test $# = 0 && func_missing_arg $_G_opt && break + if test $# = 0 && func_missing_arg $_G_opt; then + _G_rc_parse_options=: + break + fi case " $warning_categories $1" in *" $1 "*) # trailing space prevents matching last $1 above @@ -1757,15 +1815,25 @@ func_parse_options () shift ;; - --) break ;; + --) _G_rc_parse_options=: ; break ;; -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; - *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + *) set dummy "$_G_opt" ${1+"$@"}; shift + _G_match_parse_options=false + break + ;; esac + + $_G_match_parse_options && _G_rc_parse_options=: done - # save modified positional parameters for caller - func_quote_for_eval ${1+"$@"} - func_parse_options_result=$func_quote_for_eval_result + + if $_G_rc_parse_options; then + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + func_parse_options_result=$func_quote_for_eval_result + fi + + $_G_rc_parse_options } @@ -1778,16 +1846,21 @@ func_validate_options () { $debug_cmd + _G_rc_validate_options=false + # Display all warnings if -W was not given. test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" - func_run_hooks func_validate_options ${1+"$@"} + if func_run_hooks func_validate_options ${1+"$@"}; then + # save modified positional parameters for caller + func_validate_options_result=$func_run_hooks_result + _G_rc_validate_options=: + fi # Bail if the options were screwed! $exit_cmd $EXIT_FAILURE - # save modified positional parameters for caller - func_validate_options_result=$func_run_hooks_result + $_G_rc_validate_options } @@ -2068,7 +2141,7 @@ include the following information: compiler: $LTCC compiler flags: $LTCFLAGS linker: $LD (gnu? $with_gnu_ld) - version: $progname $scriptversion Debian-2.4.6-2 + version: $progname $scriptversion Debian-2.4.6-15 automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` @@ -2270,6 +2343,8 @@ libtool_options_prep () nonopt= preserve_args= + _G_rc_lt_options_prep=: + # Shorthand for --mode=foo, only valid as the first argument case $1 in clean|clea|cle|cl) @@ -2293,11 +2368,18 @@ libtool_options_prep () uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) shift; set dummy --mode uninstall ${1+"$@"}; shift ;; + *) + _G_rc_lt_options_prep=false + ;; esac - # Pass back the list of options. - func_quote_for_eval ${1+"$@"} - libtool_options_prep_result=$func_quote_for_eval_result + if $_G_rc_lt_options_prep; then + # Pass back the list of options. + func_quote_for_eval ${1+"$@"} + libtool_options_prep_result=$func_quote_for_eval_result + fi + + $_G_rc_lt_options_prep } func_add_hook func_options_prep libtool_options_prep @@ -2309,9 +2391,12 @@ libtool_parse_options () { $debug_cmd + _G_rc_lt_parse_options=false + # Perform our own loop to consume as many options as possible in # each iteration. while test $# -gt 0; do + _G_match_lt_parse_options=: _G_opt=$1 shift case $_G_opt in @@ -2386,15 +2471,22 @@ libtool_parse_options () func_append preserve_args " $_G_opt" ;; - # An option not handled by this hook function: - *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; + # An option not handled by this hook function: + *) set dummy "$_G_opt" ${1+"$@"} ; shift + _G_match_lt_parse_options=false + break + ;; esac + $_G_match_lt_parse_options && _G_rc_lt_parse_options=: done + if $_G_rc_lt_parse_options; then + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + libtool_parse_options_result=$func_quote_for_eval_result + fi - # save modified positional parameters for caller - func_quote_for_eval ${1+"$@"} - libtool_parse_options_result=$func_quote_for_eval_result + $_G_rc_lt_parse_options } func_add_hook func_parse_options libtool_parse_options @@ -7275,10 +7367,13 @@ func_mode_link () # -specs=* GCC specs files # -stdlib=* select c++ std lib with clang # -fsanitize=* Clang/GCC memory and address sanitizer + # -fuse-ld=* Linker select flags for GCC + # -static-* direct GCC to link specific libraries statically + # -fcilkplus Cilk Plus language extension features for C/C++ -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ - -specs=*|-fsanitize=*) + -specs=*|-fsanitize=*|-fuse-ld=*|-static-*|-fcilkplus) func_quote_for_eval "$arg" arg=$func_quote_for_eval_result func_append compile_command " $arg" diff --git a/vendor/libssh2/m4/libtool.m4 b/vendor/libssh2/m4/libtool.m4 index ee80844b61..c4c02946de 100644 --- a/vendor/libssh2/m4/libtool.m4 +++ b/vendor/libssh2/m4/libtool.m4 @@ -1041,8 +1041,8 @@ int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD - echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD - $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD + echo "$AR cr libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD + $AR cr libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD cat > conftest.c << _LT_EOF @@ -1071,11 +1071,11 @@ _LT_EOF # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in - 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + 10.0,*86*-darwin8*|10.0,*-darwin[[912]]*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; 10.[[012]][[,.]]*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; - 10.*) + 10.*|11.*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; esac ;; @@ -1492,7 +1492,7 @@ need_locks=$enable_libtool_lock m4_defun([_LT_PROG_AR], [AC_CHECK_TOOLS(AR, [ar], false) : ${AR=ar} -: ${AR_FLAGS=cru} +: ${AR_FLAGS=cr} _LT_DECL([], [AR], [1], [The archiver]) _LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) @@ -4063,7 +4063,8 @@ _LT_EOF if AC_TRY_EVAL(ac_compile); then # Now try to grab the symbols. nlist=conftest.nm - if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then + $ECHO "$as_me:$LINENO: $NM conftest.$ac_objext | $lt_cv_sys_global_symbol_pipe > $nlist" >&AS_MESSAGE_LOG_FD + if eval "$NM" conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist 2>&AS_MESSAGE_LOG_FD && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" @@ -4703,6 +4704,12 @@ m4_if([$1], [CXX], [ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; + # flang / f18. f95 an alias for gfortran or flang on Debian + flang* | f18* | f95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) @@ -6438,7 +6445,7 @@ if test yes != "$_lt_caught_CXX_error"; then # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' else GXX=no @@ -6813,7 +6820,7 @@ if test yes != "$_lt_caught_CXX_error"; then # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then @@ -6878,7 +6885,7 @@ if test yes != "$_lt_caught_CXX_error"; then # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then @@ -7217,7 +7224,7 @@ if test yes != "$_lt_caught_CXX_error"; then # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' else # FIXME: insert proper C++ library support @@ -7301,7 +7308,7 @@ if test yes != "$_lt_caught_CXX_error"; then # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' else # g++ 2.7 appears to require '-G' NOT '-shared' on this # platform. @@ -7312,7 +7319,7 @@ if test yes != "$_lt_caught_CXX_error"; then # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. - output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir' diff --git a/vendor/libssh2/missing b/vendor/libssh2/missing index f62bbae306..8d0eaad250 100755 --- a/vendor/libssh2/missing +++ b/vendor/libssh2/missing @@ -1,9 +1,9 @@ #! /bin/sh # Common wrapper for a few potentially missing GNU programs. -scriptversion=2013-10-28.13; # UTC +scriptversion=2018-03-07.03; # UTC -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2020 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify @@ -17,7 +17,7 @@ scriptversion=2013-10-28.13; # UTC # GNU General Public License for more details. # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -101,9 +101,9 @@ else exit $st fi -perl_URL=http://www.perl.org/ -flex_URL=http://flex.sourceforge.net/ -gnu_software_URL=http://www.gnu.org/software +perl_URL=https://www.perl.org/ +flex_URL=https://github.com/westes/flex +gnu_software_URL=https://www.gnu.org/software program_details () { @@ -207,9 +207,9 @@ give_advice "$1" | sed -e '1s/^/WARNING: /' \ exit $st # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" +# time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: diff --git a/vendor/libssh2/os400/initscript.sh b/vendor/libssh2/os400/initscript.sh index 1d47a1dd8e..a18e24cfeb 100644 --- a/vendor/libssh2/os400/initscript.sh +++ b/vendor/libssh2/os400/initscript.sh @@ -49,7 +49,7 @@ setenv TGTCCSID '500' # Target CCSID of objects. setenv DEBUG '*ALL' # Debug level. setenv OPTIMIZE '10' # Optimisation level setenv OUTPUT '*NONE' # Compilation output option. -setenv TGTRLS 'V5R3M0' # Target OS release. +setenv TGTRLS 'V6R1M0' # Target OS release. setenv IFSDIR '/libssh2' # Installation IFS directory. # Define ZLIB availability and locations. @@ -180,7 +180,7 @@ make_module() CMD="CRTCMOD MODULE(${TARGETLIB}/${1}) SRCSTMF('__tmpsrcf.c')" # CMD="${CMD} SYSIFCOPT(*IFS64IO) OPTION(*INCDIRFIRST *SHOWINC *SHOWSYS)" CMD="${CMD} SYSIFCOPT(*IFS64IO) OPTION(*INCDIRFIRST)" - CMD="${CMD} LOCALETYPE(*LOCALE)" + CMD="${CMD} LOCALETYPE(*LOCALE) FLAG(10)" CMD="${CMD} INCDIR('${TOPDIR}/os400/include'" CMD="${CMD} '/QIBM/ProdData/qadrt/include' '${TOPDIR}/include'" CMD="${CMD} '${TOPDIR}/os400' '${SRCDIR}'" diff --git a/vendor/libssh2/src/CMakeLists.txt b/vendor/libssh2/src/CMakeLists.txt index 6401acff24..eee1a80d4a 100644 --- a/vendor/libssh2/src/CMakeLists.txt +++ b/vendor/libssh2/src/CMakeLists.txt @@ -77,21 +77,21 @@ if(CRYPTO_BACKEND STREQUAL "OpenSSL" OR NOT CRYPTO_BACKEND) list(APPEND PC_LIBS -lcrypt32) find_file(DLL_LIBEAY32 - NAMES libeay32.dll crypto.dll + NAMES libeay32.dll crypto.dll libcrypto-1_1.dll libcrypto-1_1-x64.dll HINTS ${_OPENSSL_ROOT_HINTS} PATHS ${_OPENSSL_ROOT_PATHS} PATH_SUFFIXES bin) if (NOT DLL_LIBEAY32) message(WARNING - "Unable to find OpenSSL libeay32 DLL, executables may not run") + "Unable to find OpenSSL crypto (aka libeay32) DLL, executables may not run") endif() find_file(DLL_SSLEAY32 - NAMES ssleay32.dll ssl.dll + NAMES ssleay32.dll ssl.dll libssl-1_1.dll libssl-1_1-x64.dll HINTS ${_OPENSSL_ROOT_HINTS} PATHS ${_OPENSSL_ROOT_PATHS} PATH_SUFFIXES bin) if (NOT DLL_SSLEAY32) message(WARNING - "Unable to find OpenSSL ssleay32 DLL, executables may not run") + "Unable to find OpenSSL ssl (aka ssleay32) DLL, executables may not run") endif() if(DLL_LIBEAY32 AND DLL_SSLEAY32) @@ -176,6 +176,10 @@ include(GNUInstallDirs) set(SOURCES ${CRYPTO_SOURCES} agent.c + agent_win.c + blf.h + bcrypt_pbkdf.c + blowfish.c channel.c channel.h comp.c @@ -217,7 +221,7 @@ set_target_properties(libssh2 PROPERTIES PREFIX "") target_compile_definitions(libssh2 PRIVATE ${PRIVATE_COMPILE_DEFINITIONS}) target_include_directories(libssh2 - PRIVATE ${PRIVATE_INCLUDE_DIRECTORIES} + PRIVATE "${PROJECT_SOURCE_DIR}/include/" ${PRIVATE_INCLUDE_DIRECTORIES} PUBLIC $ $/${CMAKE_INSTALL_INCLUDEDIR}>) @@ -312,6 +316,7 @@ if (NOT HAVE_STRTOLL) check_symbol_exists(_strtoi64 stdlib.h HAVE_STRTOI64) endif() check_symbol_exists(snprintf stdio.h HAVE_SNPRINTF) +check_symbol_exists(memset_s string.h HAVE_MEMSET_S) if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin" OR ${CMAKE_SYSTEM_NAME} STREQUAL "Interix") @@ -322,7 +327,7 @@ if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin" OR # filesystem here" # # Mac OS X's poll has funny behaviors, like: - # not being able to do poll on no fildescriptors (10.3?) + # not being able to do poll on no filedescriptors (10.3?) # not being able to poll on some files (like anything in /dev) # not having reliable timeout support # inconsistent return of POLLHUP where other implementations give POLLIN @@ -333,7 +338,7 @@ endif() append_needed_socket_libraries(LIBRARIES) -# Non-blocking socket support tests. Must be after after library tests to +# Non-blocking socket support tests. Must be after library tests to # link correctly set(SAVE_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES}) set(CMAKE_REQUIRED_LIBRARIES ${LIBRARIES}) @@ -355,6 +360,11 @@ elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") target_compile_definitions(libssh2 PRIVATE LIBSSH2_DARWIN) endif() +if(MSVC) + set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Zi /Od") + set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} /DEBUG") +endif() + if(CMAKE_VERSION VERSION_LESS "2.8.12") # Fall back to over-linking dependencies target_link_libraries(libssh2 ${LIBRARIES}) @@ -388,7 +398,7 @@ set(RUNTIME_DEPENDENCIES ${_RUNTIME_DEPENDENCIES} CACHE INTERNAL ## During package installation, install Libssh2Config.cmake install(EXPORT Libssh2Config NAMESPACE Libssh2:: - DESTINATION lib/cmake/libssh2) + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libssh2) ## During build, register directly from build tree # create Libssh2Config.cmake @@ -420,4 +430,4 @@ write_basic_package_version_file( COMPATIBILITY SameMajorVersion) install( FILES ${CMAKE_CURRENT_BINARY_DIR}/Libssh2ConfigVersion.cmake - DESTINATION lib/cmake/libssh2) + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libssh2) diff --git a/vendor/libssh2/src/Makefile.am b/vendor/libssh2/src/Makefile.am index c14dc7cb3d..31d58ed573 100644 --- a/vendor/libssh2/src/Makefile.am +++ b/vendor/libssh2/src/Makefile.am @@ -1,7 +1,7 @@ # $Id: Makefile.am,v 1.21 2009/05/07 17:21:56 bagder Exp $ AUTOMAKE_OPTIONS = foreign nostdinc -# Get the CRYPTO_CSOURCES and CRYPTO_HHEADERS defines +# Get the CRYPTO_CSOURCES, CRYPTO_HHEADERS and CRYPTO_LTLIBS defines if OPENSSL include ../Makefile.OpenSSL.inc endif @@ -11,9 +11,6 @@ endif if WINCNG include ../Makefile.WinCNG.inc endif -if OS400QC3 -include ../Makefile.os400qc3.inc -endif if MBEDTLS include ../Makefile.mbedTLS.inc endif @@ -65,4 +62,4 @@ VERSION=-version-info 1:1:0 libssh2_la_LDFLAGS = $(VERSION) -no-undefined \ -export-symbols-regex '^libssh2_.*' \ - $(LTLIBGCRYPT) $(LTLIBSSL) $(LTLIBZ) + $(CRYPTO_LTLIBS) $(LTLIBZ) diff --git a/vendor/libssh2/src/Makefile.in b/vendor/libssh2/src/Makefile.in index 9e59967eeb..d76490ab63 100644 --- a/vendor/libssh2/src/Makefile.in +++ b/vendor/libssh2/src/Makefile.in @@ -1,7 +1,7 @@ -# Makefile.in generated by automake 1.15 from Makefile.am. +# Makefile.in generated by automake 1.16.4 from Makefile.am. # @configure_input@ -# Copyright (C) 1994-2014 Free Software Foundation, Inc. +# Copyright (C) 1994-2021 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -100,8 +100,7 @@ am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = libssh2_config.h \ - $(top_builddir)/example/libssh2_config.h +CONFIG_HEADER = libssh2_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; @@ -137,12 +136,12 @@ libssh2_la_LIBADD = am__libssh2_la_SOURCES_DIST = channel.c comp.c crypt.c hostkey.c kex.c \ mac.c misc.c packet.c publickey.c scp.c session.c sftp.c \ userauth.c transport.c version.c knownhost.c agent.c \ - libgcrypt.c mbedtls.c openssl.c os400qc3.c wincng.c pem.c \ - keepalive.c global.c libssh2_priv.h libgcrypt.h mbedtls.h \ - openssl.h os400qc3.h wincng.h transport.h channel.h comp.h \ - mac.h misc.h packet.h userauth.h session.h sftp.h crypto.h -@LIBGCRYPT_FALSE@@MBEDTLS_FALSE@@OPENSSL_FALSE@@OS400QC3_FALSE@@WINCNG_TRUE@am__objects_1 = wincng.lo -@LIBGCRYPT_FALSE@@MBEDTLS_FALSE@@OPENSSL_FALSE@@OS400QC3_TRUE@am__objects_1 = os400qc3.lo + libgcrypt.c mbedtls.c openssl.c wincng.c pem.c keepalive.c \ + global.c blowfish.c bcrypt_pbkdf.c agent_win.c libssh2_priv.h \ + libgcrypt.h mbedtls.h openssl.h wincng.h transport.h channel.h \ + comp.h mac.h misc.h packet.h userauth.h session.h sftp.h \ + crypto.h blf.h agent.h +@LIBGCRYPT_FALSE@@MBEDTLS_FALSE@@OPENSSL_FALSE@@WINCNG_TRUE@am__objects_1 = wincng.lo @LIBGCRYPT_FALSE@@MBEDTLS_FALSE@@OPENSSL_TRUE@am__objects_1 = \ @LIBGCRYPT_FALSE@@MBEDTLS_FALSE@@OPENSSL_TRUE@ openssl.lo @LIBGCRYPT_FALSE@@MBEDTLS_TRUE@am__objects_1 = mbedtls.lo @@ -150,7 +149,8 @@ am__libssh2_la_SOURCES_DIST = channel.c comp.c crypt.c hostkey.c kex.c \ am__objects_2 = channel.lo comp.lo crypt.lo hostkey.lo kex.lo mac.lo \ misc.lo packet.lo publickey.lo scp.lo session.lo sftp.lo \ userauth.lo transport.lo version.lo knownhost.lo agent.lo \ - $(am__objects_1) pem.lo keepalive.lo global.lo + $(am__objects_1) pem.lo keepalive.lo global.lo blowfish.lo \ + bcrypt_pbkdf.lo agent_win.lo am__objects_3 = am__objects_4 = $(am__objects_3) am_libssh2_la_OBJECTS = $(am__objects_2) $(am__objects_4) @@ -176,7 +176,21 @@ am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/agent.Plo ./$(DEPDIR)/agent_win.Plo \ + ./$(DEPDIR)/bcrypt_pbkdf.Plo ./$(DEPDIR)/blowfish.Plo \ + ./$(DEPDIR)/channel.Plo ./$(DEPDIR)/comp.Plo \ + ./$(DEPDIR)/crypt.Plo ./$(DEPDIR)/global.Plo \ + ./$(DEPDIR)/hostkey.Plo ./$(DEPDIR)/keepalive.Plo \ + ./$(DEPDIR)/kex.Plo ./$(DEPDIR)/knownhost.Plo \ + ./$(DEPDIR)/libgcrypt.Plo ./$(DEPDIR)/mac.Plo \ + ./$(DEPDIR)/mbedtls.Plo ./$(DEPDIR)/misc.Plo \ + ./$(DEPDIR)/openssl.Plo ./$(DEPDIR)/packet.Plo \ + ./$(DEPDIR)/pem.Plo ./$(DEPDIR)/publickey.Plo \ + ./$(DEPDIR)/scp.Plo ./$(DEPDIR)/session.Plo \ + ./$(DEPDIR)/sftp.Plo ./$(DEPDIR)/transport.Plo \ + ./$(DEPDIR)/userauth.Plo ./$(DEPDIR)/version.Plo \ + ./$(DEPDIR)/wincng.Plo am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) @@ -203,8 +217,8 @@ am__can_run_installinfo = \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ - $(LISP)libssh2_config.h.in +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \ + libssh2_config.h.in # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. @@ -221,13 +235,10 @@ am__define_uniq_tagged_files = \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags am__DIST_COMMON = $(srcdir)/../Makefile.OpenSSL.inc \ $(srcdir)/../Makefile.WinCNG.inc $(srcdir)/../Makefile.inc \ $(srcdir)/../Makefile.libgcrypt.inc \ - $(srcdir)/../Makefile.mbedTLS.inc \ - $(srcdir)/../Makefile.os400qc3.inc $(srcdir)/Makefile.in \ + $(srcdir)/../Makefile.mbedTLS.inc $(srcdir)/Makefile.in \ $(srcdir)/libssh2_config.h.in $(top_srcdir)/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ @@ -245,6 +256,12 @@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ @@ -255,13 +272,14 @@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ +ETAGS = @ETAGS@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ HAVE_LIBBCRYPT = @HAVE_LIBBCRYPT@ HAVE_LIBCRYPT32 = @HAVE_LIBCRYPT32@ HAVE_LIBGCRYPT = @HAVE_LIBGCRYPT@ -HAVE_LIBMBEDTLS = @HAVE_LIBMBEDTLS@ +HAVE_LIBMBEDCRYPTO = @HAVE_LIBMBEDCRYPTO@ HAVE_LIBSSL = @HAVE_LIBSSL@ HAVE_LIBZ = @HAVE_LIBZ@ INSTALL = @INSTALL@ @@ -277,8 +295,8 @@ LIBCRYPT32 = @LIBCRYPT32@ LIBCRYPT32_PREFIX = @LIBCRYPT32_PREFIX@ LIBGCRYPT = @LIBGCRYPT@ LIBGCRYPT_PREFIX = @LIBGCRYPT_PREFIX@ -LIBMBEDTLS = @LIBMBEDTLS@ -LIBMBEDTLS_PREFIX = @LIBMBEDTLS_PREFIX@ +LIBMBEDCRYPTO = @LIBMBEDCRYPTO@ +LIBMBEDCRYPTO_PREFIX = @LIBMBEDCRYPTO_PREFIX@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSREQUIRED = @LIBSREQUIRED@ @@ -288,12 +306,13 @@ LIBSSL_PREFIX = @LIBSSL_PREFIX@ LIBTOOL = @LIBTOOL@ LIBZ = @LIBZ@ LIBZ_PREFIX = @LIBZ_PREFIX@ +LIB_FUZZING_ENGINE = @LIB_FUZZING_ENGINE@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBBCRYPT = @LTLIBBCRYPT@ LTLIBCRYPT32 = @LTLIBCRYPT32@ LTLIBGCRYPT = @LTLIBGCRYPT@ -LTLIBMBEDTLS = @LTLIBMBEDTLS@ +LTLIBMBEDCRYPTO = @LTLIBMBEDCRYPTO@ LTLIBOBJS = @LTLIBOBJS@ LTLIBSSL = @LTLIBSSL@ LTLIBZ = @LTLIBZ@ @@ -329,6 +348,7 @@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ @@ -382,22 +402,25 @@ AUTOMAKE_OPTIONS = foreign nostdinc @LIBGCRYPT_TRUE@CRYPTO_CSOURCES = libgcrypt.c @MBEDTLS_TRUE@CRYPTO_CSOURCES = mbedtls.c @OPENSSL_TRUE@CRYPTO_CSOURCES = openssl.c -@OS400QC3_TRUE@CRYPTO_CSOURCES = os400qc3.c @WINCNG_TRUE@CRYPTO_CSOURCES = wincng.c @LIBGCRYPT_TRUE@CRYPTO_HHEADERS = libgcrypt.h @MBEDTLS_TRUE@CRYPTO_HHEADERS = mbedtls.h @OPENSSL_TRUE@CRYPTO_HHEADERS = openssl.h -@OS400QC3_TRUE@CRYPTO_HHEADERS = os400qc3.h @WINCNG_TRUE@CRYPTO_HHEADERS = wincng.h +@LIBGCRYPT_TRUE@CRYPTO_LTLIBS = $(LTLIBGCRYPT) +@MBEDTLS_TRUE@CRYPTO_LTLIBS = $(LTLIBMBEDCRYPTO) +@OPENSSL_TRUE@CRYPTO_LTLIBS = $(LTLIBSSL) +@WINCNG_TRUE@CRYPTO_LTLIBS = $(LTLIBBCRYPT) $(LTLIBCRYPT32) CSOURCES = channel.c comp.c crypt.c hostkey.c kex.c mac.c misc.c \ packet.c publickey.c scp.c session.c sftp.c userauth.c transport.c \ - version.c knownhost.c agent.c $(CRYPTO_CSOURCES) pem.c keepalive.c global.c + version.c knownhost.c agent.c $(CRYPTO_CSOURCES) pem.c keepalive.c global.c \ + blowfish.c bcrypt_pbkdf.c agent_win.c HHEADERS = libssh2_priv.h $(CRYPTO_HHEADERS) transport.h channel.h comp.h \ - mac.h misc.h packet.h userauth.h session.h sftp.h crypto.h + mac.h misc.h packet.h userauth.h session.h sftp.h crypto.h blf.h agent.h -# Get the CRYPTO_CSOURCES and CRYPTO_HHEADERS defines +# Get the CRYPTO_CSOURCES, CRYPTO_HHEADERS and CRYPTO_LTLIBS defines # Makefile.inc provides the CSOURCES and HHEADERS defines libssh2_la_SOURCES = $(CSOURCES) $(HHEADERS) @@ -439,14 +462,14 @@ AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/src # libssh2_la_LDFLAGS = $(VERSION) -no-undefined \ -export-symbols-regex '^libssh2_.*' \ - $(LTLIBGCRYPT) $(LTLIBSSL) $(LTLIBZ) + $(CRYPTO_LTLIBS) $(LTLIBZ) all: libssh2_config.h $(MAKE) $(AM_MAKEFLAGS) all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(srcdir)/../Makefile.OpenSSL.inc $(srcdir)/../Makefile.libgcrypt.inc $(srcdir)/../Makefile.WinCNG.inc $(srcdir)/../Makefile.os400qc3.inc $(srcdir)/../Makefile.mbedTLS.inc $(srcdir)/../Makefile.inc $(am__configure_deps) +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(srcdir)/../Makefile.OpenSSL.inc $(srcdir)/../Makefile.libgcrypt.inc $(srcdir)/../Makefile.WinCNG.inc $(srcdir)/../Makefile.mbedTLS.inc $(srcdir)/../Makefile.inc $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ @@ -463,10 +486,10 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; -$(srcdir)/../Makefile.OpenSSL.inc $(srcdir)/../Makefile.libgcrypt.inc $(srcdir)/../Makefile.WinCNG.inc $(srcdir)/../Makefile.os400qc3.inc $(srcdir)/../Makefile.mbedTLS.inc $(srcdir)/../Makefile.inc $(am__empty): +$(srcdir)/../Makefile.OpenSSL.inc $(srcdir)/../Makefile.libgcrypt.inc $(srcdir)/../Makefile.WinCNG.inc $(srcdir)/../Makefile.mbedTLS.inc $(srcdir)/../Makefile.inc $(am__empty): $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh @@ -536,31 +559,39 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/channel.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/comp.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/crypt.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/global.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hostkey.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/keepalive.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/kex.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/knownhost.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgcrypt.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mac.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mbedtls.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/misc.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/openssl.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/os400qc3.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/packet.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pem.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/publickey.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/session.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/transport.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/userauth.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/version.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/wincng.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent_win.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bcrypt_pbkdf.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/blowfish.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/channel.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/comp.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/crypt.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/global.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hostkey.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/keepalive.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/kex.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/knownhost.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libgcrypt.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mac.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mbedtls.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/misc.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/openssl.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/packet.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pem.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/publickey.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scp.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/session.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sftp.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/transport.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/userauth.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/version.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/wincng.Plo@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @@ -640,8 +671,10 @@ cscopelist-am: $(am__tagged_files) distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am -distdir: $(DISTFILES) +distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ @@ -714,7 +747,33 @@ clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-am - -rm -rf ./$(DEPDIR) + -rm -f ./$(DEPDIR)/agent.Plo + -rm -f ./$(DEPDIR)/agent_win.Plo + -rm -f ./$(DEPDIR)/bcrypt_pbkdf.Plo + -rm -f ./$(DEPDIR)/blowfish.Plo + -rm -f ./$(DEPDIR)/channel.Plo + -rm -f ./$(DEPDIR)/comp.Plo + -rm -f ./$(DEPDIR)/crypt.Plo + -rm -f ./$(DEPDIR)/global.Plo + -rm -f ./$(DEPDIR)/hostkey.Plo + -rm -f ./$(DEPDIR)/keepalive.Plo + -rm -f ./$(DEPDIR)/kex.Plo + -rm -f ./$(DEPDIR)/knownhost.Plo + -rm -f ./$(DEPDIR)/libgcrypt.Plo + -rm -f ./$(DEPDIR)/mac.Plo + -rm -f ./$(DEPDIR)/mbedtls.Plo + -rm -f ./$(DEPDIR)/misc.Plo + -rm -f ./$(DEPDIR)/openssl.Plo + -rm -f ./$(DEPDIR)/packet.Plo + -rm -f ./$(DEPDIR)/pem.Plo + -rm -f ./$(DEPDIR)/publickey.Plo + -rm -f ./$(DEPDIR)/scp.Plo + -rm -f ./$(DEPDIR)/session.Plo + -rm -f ./$(DEPDIR)/sftp.Plo + -rm -f ./$(DEPDIR)/transport.Plo + -rm -f ./$(DEPDIR)/userauth.Plo + -rm -f ./$(DEPDIR)/version.Plo + -rm -f ./$(DEPDIR)/wincng.Plo -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-hdr distclean-tags @@ -760,7 +819,33 @@ install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) + -rm -f ./$(DEPDIR)/agent.Plo + -rm -f ./$(DEPDIR)/agent_win.Plo + -rm -f ./$(DEPDIR)/bcrypt_pbkdf.Plo + -rm -f ./$(DEPDIR)/blowfish.Plo + -rm -f ./$(DEPDIR)/channel.Plo + -rm -f ./$(DEPDIR)/comp.Plo + -rm -f ./$(DEPDIR)/crypt.Plo + -rm -f ./$(DEPDIR)/global.Plo + -rm -f ./$(DEPDIR)/hostkey.Plo + -rm -f ./$(DEPDIR)/keepalive.Plo + -rm -f ./$(DEPDIR)/kex.Plo + -rm -f ./$(DEPDIR)/knownhost.Plo + -rm -f ./$(DEPDIR)/libgcrypt.Plo + -rm -f ./$(DEPDIR)/mac.Plo + -rm -f ./$(DEPDIR)/mbedtls.Plo + -rm -f ./$(DEPDIR)/misc.Plo + -rm -f ./$(DEPDIR)/openssl.Plo + -rm -f ./$(DEPDIR)/packet.Plo + -rm -f ./$(DEPDIR)/pem.Plo + -rm -f ./$(DEPDIR)/publickey.Plo + -rm -f ./$(DEPDIR)/scp.Plo + -rm -f ./$(DEPDIR)/session.Plo + -rm -f ./$(DEPDIR)/sftp.Plo + -rm -f ./$(DEPDIR)/transport.Plo + -rm -f ./$(DEPDIR)/userauth.Plo + -rm -f ./$(DEPDIR)/version.Plo + -rm -f ./$(DEPDIR)/wincng.Plo -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic @@ -781,9 +866,9 @@ uninstall-am: uninstall-libLTLIBRARIES .MAKE: all install-am install-strip -.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ - clean-libLTLIBRARIES clean-libtool cscopelist-am ctags \ - ctags-am distclean distclean-compile distclean-generic \ +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ + clean-generic clean-libLTLIBRARIES clean-libtool cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ distclean-hdr distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ diff --git a/vendor/libssh2/src/agent.c b/vendor/libssh2/src/agent.c index c2ba422b65..7ad4ef471f 100644 --- a/vendor/libssh2/src/agent.c +++ b/vendor/libssh2/src/agent.c @@ -38,6 +38,7 @@ */ #include "libssh2_priv.h" +#include "agent.h" #include "misc.h" #include #ifdef HAVE_SYS_UN_H @@ -50,6 +51,9 @@ #endif #include "userauth.h" #include "session.h" +#ifdef WIN32 +#include +#endif /* Requests from client to agent for protocol 1 key operations */ #define SSH_AGENTC_REQUEST_RSA_IDENTITIES 1 @@ -90,55 +94,9 @@ #define SSH_AGENT_CONSTRAIN_LIFETIME 1 #define SSH_AGENT_CONSTRAIN_CONFIRM 2 -/* non-blocking mode on agent connection is not yet implemented, but - for future use. */ -typedef enum { - agent_NB_state_init = 0, - agent_NB_state_request_created, - agent_NB_state_request_length_sent, - agent_NB_state_request_sent, - agent_NB_state_response_length_received, - agent_NB_state_response_received -} agent_nonblocking_states; - -typedef struct agent_transaction_ctx { - unsigned char *request; - size_t request_len; - unsigned char *response; - size_t response_len; - agent_nonblocking_states state; -} *agent_transaction_ctx_t; - -typedef int (*agent_connect_func)(LIBSSH2_AGENT *agent); -typedef int (*agent_transact_func)(LIBSSH2_AGENT *agent, - agent_transaction_ctx_t transctx); -typedef int (*agent_disconnect_func)(LIBSSH2_AGENT *agent); - -struct agent_publickey { - struct list_node node; - - /* this is the struct we expose externally */ - struct libssh2_agent_publickey external; -}; - -struct agent_ops { - agent_connect_func connect; - agent_transact_func transact; - agent_disconnect_func disconnect; -}; - -struct _LIBSSH2_AGENT -{ - LIBSSH2_SESSION *session; /* the session this "belongs to" */ - - libssh2_socket_t fd; - - struct agent_ops *ops; - - struct agent_transaction_ctx transctx; - struct agent_publickey *identity; - struct list_head head; /* list of public keys */ -}; +/* Signature request methods */ +#define SSH_AGENT_RSA_SHA2_256 2 +#define SSH_AGENT_RSA_SHA2_512 4 #ifdef PF_UNIX static int @@ -147,22 +105,25 @@ agent_connect_unix(LIBSSH2_AGENT *agent) const char *path; struct sockaddr_un s_un; - path = getenv("SSH_AUTH_SOCK"); - if (!path) - return _libssh2_error(agent->session, LIBSSH2_ERROR_BAD_USE, - "no auth sock variable"); + path = agent->identity_agent_path; + if(!path) { + path = getenv("SSH_AUTH_SOCK"); + if(!path) + return _libssh2_error(agent->session, LIBSSH2_ERROR_BAD_USE, + "no auth sock variable"); + } agent->fd = socket(PF_UNIX, SOCK_STREAM, 0); - if (agent->fd < 0) + if(agent->fd < 0) return _libssh2_error(agent->session, LIBSSH2_ERROR_BAD_SOCKET, "failed creating socket"); s_un.sun_family = AF_UNIX; - strncpy (s_un.sun_path, path, sizeof s_un.sun_path); - s_un.sun_path[sizeof(s_un.sun_path)-1]=0; /* make sure there's a trailing - zero */ - if (connect(agent->fd, (struct sockaddr*)(&s_un), sizeof s_un) != 0) { - close (agent->fd); + strncpy(s_un.sun_path, path, sizeof s_un.sun_path); + s_un.sun_path[sizeof(s_un.sun_path)-1] = 0; /* make sure there's a trailing + zero */ + if(connect(agent->fd, (struct sockaddr*)(&s_un), sizeof s_un) != 0) { + close(agent->fd); return _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, "failed connecting with agent"); } @@ -170,6 +131,38 @@ agent_connect_unix(LIBSSH2_AGENT *agent) return LIBSSH2_ERROR_NONE; } +#define RECV_SEND_ALL(func, socket, buffer, length, flags, abstract) \ + int rc; \ + size_t finished = 0; \ + \ + while(finished < length) { \ + rc = func(socket, \ + (char *)buffer + finished, length - finished, \ + flags, abstract); \ + if(rc < 0) \ + return rc; \ + \ + finished += rc; \ + } \ + \ + return finished; + +static ssize_t _send_all(LIBSSH2_SEND_FUNC(func), libssh2_socket_t socket, + const void *buffer, size_t length, + int flags, void **abstract) +{ + RECV_SEND_ALL(func, socket, buffer, length, flags, abstract); +} + +static ssize_t _recv_all(LIBSSH2_RECV_FUNC(func), libssh2_socket_t socket, + void *buffer, size_t length, + int flags, void **abstract) +{ + RECV_SEND_ALL(func, socket, buffer, length, flags, abstract); +} + +#undef RECV_SEND_ALL + static int agent_transact_unix(LIBSSH2_AGENT *agent, agent_transaction_ctx_t transctx) { @@ -177,34 +170,36 @@ agent_transact_unix(LIBSSH2_AGENT *agent, agent_transaction_ctx_t transctx) int rc; /* Send the length of the request */ - if (transctx->state == agent_NB_state_request_created) { + if(transctx->state == agent_NB_state_request_created) { _libssh2_htonu32(buf, transctx->request_len); - rc = LIBSSH2_SEND_FD(agent->session, agent->fd, buf, sizeof buf, 0); - if (rc == -EAGAIN) + rc = _send_all(agent->session->send, agent->fd, + buf, sizeof buf, 0, &agent->session->abstract); + if(rc == -EAGAIN) return LIBSSH2_ERROR_EAGAIN; - else if (rc < 0) + else if(rc < 0) return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_SEND, "agent send failed"); transctx->state = agent_NB_state_request_length_sent; } /* Send the request body */ - if (transctx->state == agent_NB_state_request_length_sent) { - rc = LIBSSH2_SEND_FD(agent->session, agent->fd, transctx->request, - transctx->request_len, 0); - if (rc == -EAGAIN) + if(transctx->state == agent_NB_state_request_length_sent) { + rc = _send_all(agent->session->send, agent->fd, transctx->request, + transctx->request_len, 0, &agent->session->abstract); + if(rc == -EAGAIN) return LIBSSH2_ERROR_EAGAIN; - else if (rc < 0) + else if(rc < 0) return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_SEND, "agent send failed"); transctx->state = agent_NB_state_request_sent; } /* Receive the length of a response */ - if (transctx->state == agent_NB_state_request_sent) { - rc = LIBSSH2_RECV_FD(agent->session, agent->fd, buf, sizeof buf, 0); - if (rc < 0) { - if (rc == -EAGAIN) + if(transctx->state == agent_NB_state_request_sent) { + rc = _recv_all(agent->session->recv, agent->fd, + buf, sizeof buf, 0, &agent->session->abstract); + if(rc < 0) { + if(rc == -EAGAIN) return LIBSSH2_ERROR_EAGAIN; return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_RECV, "agent recv failed"); @@ -212,18 +207,18 @@ agent_transact_unix(LIBSSH2_AGENT *agent, agent_transaction_ctx_t transctx) transctx->response_len = _libssh2_ntohu32(buf); transctx->response = LIBSSH2_ALLOC(agent->session, transctx->response_len); - if (!transctx->response) + if(!transctx->response) return LIBSSH2_ERROR_ALLOC; transctx->state = agent_NB_state_response_length_received; } /* Receive the response body */ - if (transctx->state == agent_NB_state_response_length_received) { - rc = LIBSSH2_RECV_FD(agent->session, agent->fd, transctx->response, - transctx->response_len, 0); - if (rc < 0) { - if (rc == -EAGAIN) + if(transctx->state == agent_NB_state_response_length_received) { + rc = _recv_all(agent->session->recv, agent->fd, transctx->response, + transctx->response_len, 0, &agent->session->abstract); + if(rc < 0) { + if(rc == -EAGAIN) return LIBSSH2_ERROR_EAGAIN; return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_SEND, "agent recv failed"); @@ -269,8 +264,8 @@ static int agent_connect_pageant(LIBSSH2_AGENT *agent) { HWND hwnd; - hwnd = FindWindow("Pageant", "Pageant"); - if (!hwnd) + hwnd = FindWindowA("Pageant", "Pageant"); + if(!hwnd) return _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, "failed connecting agent"); agent->fd = 0; /* Mark as the connection has been established */ @@ -288,25 +283,26 @@ agent_transact_pageant(LIBSSH2_AGENT *agent, agent_transaction_ctx_t transctx) int id; COPYDATASTRUCT cds; - if (!transctx || 4 + transctx->request_len > PAGEANT_MAX_MSGLEN) + if(!transctx || 4 + transctx->request_len > PAGEANT_MAX_MSGLEN) return _libssh2_error(agent->session, LIBSSH2_ERROR_INVAL, "illegal input"); - hwnd = FindWindow("Pageant", "Pageant"); - if (!hwnd) + hwnd = FindWindowA("Pageant", "Pageant"); + if(!hwnd) return _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, "found no pageant"); - sprintf(mapname, "PageantRequest%08x", (unsigned)GetCurrentThreadId()); - filemap = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, - 0, PAGEANT_MAX_MSGLEN, mapname); + snprintf(mapname, sizeof(mapname), + "PageantRequest%08x%c", (unsigned)GetCurrentThreadId(), '\0'); + filemap = CreateFileMappingA(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, + 0, PAGEANT_MAX_MSGLEN, mapname); - if (filemap == NULL || filemap == INVALID_HANDLE_VALUE) + if(filemap == NULL || filemap == INVALID_HANDLE_VALUE) return _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, "failed setting up pageant filemap"); p2 = p = MapViewOfFile(filemap, FILE_MAP_WRITE, 0, 0, 0); - if (p == NULL || p2 == NULL) { + if(p == NULL || p2 == NULL) { CloseHandle(filemap); return _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, "failed to open pageant filemap for writing"); @@ -320,9 +316,9 @@ agent_transact_pageant(LIBSSH2_AGENT *agent, agent_transaction_ctx_t transctx) cds.lpData = mapname; id = SendMessage(hwnd, WM_COPYDATA, (WPARAM) NULL, (LPARAM) &cds); - if (id > 0) { + if(id > 0) { transctx->response_len = _libssh2_ntohu32(p); - if (transctx->response_len > PAGEANT_MAX_MSGLEN) { + if(transctx->response_len > PAGEANT_MAX_MSGLEN) { UnmapViewOfFile(p); CloseHandle(filemap); return _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, @@ -330,7 +326,7 @@ agent_transact_pageant(LIBSSH2_AGENT *agent, agent_transaction_ctx_t transctx) } transctx->response = LIBSSH2_ALLOC(agent->session, transctx->response_len); - if (!transctx->response) { + if(!transctx->response) { UnmapViewOfFile(p); CloseHandle(filemap); return _libssh2_error(agent->session, LIBSSH2_ERROR_ALLOC, @@ -364,6 +360,7 @@ static struct { } supported_backends[] = { #ifdef WIN32 {"Pageant", &agent_ops_pageant}, + {"OpenSSH", &agent_ops_openssh}, #endif /* WIN32 */ #ifdef PF_UNIX {"Unix", &agent_ops_unix}, @@ -382,11 +379,13 @@ agent_sign(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, ssize_t method_len; unsigned char *s; int rc; + unsigned char *method_name; + uint32_t sign_flags = 0; /* Create a request to sign the data */ - if (transctx->state == agent_NB_state_init) { + if(transctx->state == agent_NB_state_init) { s = transctx->request = LIBSSH2_ALLOC(session, len); - if (!transctx->request) + if(!transctx->request) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "out of memory"); @@ -398,24 +397,36 @@ agent_sign(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, _libssh2_store_str(&s, (const char *)data, data_len); /* flags */ - _libssh2_store_u32(&s, 0); + if(session->userauth_pblc_method_len > 0 && + session->userauth_pblc_method) { + if(session->userauth_pblc_method_len == 12 && + !memcmp(session->userauth_pblc_method, "rsa-sha2-512", 12)) { + sign_flags = SSH_AGENT_RSA_SHA2_512; + } + else if(session->userauth_pblc_method_len == 12 && + !memcmp(session->userauth_pblc_method, "rsa-sha2-256", 12)) { + sign_flags = SSH_AGENT_RSA_SHA2_256; + } + } + _libssh2_store_u32(&s, sign_flags); transctx->request_len = s - transctx->request; + transctx->send_recv_total = 0; transctx->state = agent_NB_state_request_created; } /* Make sure to be re-called as a result of EAGAIN. */ - if (*transctx->request != SSH2_AGENTC_SIGN_REQUEST) + if(*transctx->request != SSH2_AGENTC_SIGN_REQUEST) return _libssh2_error(session, LIBSSH2_ERROR_BAD_USE, "illegal request"); - if (!agent->ops) + if(!agent->ops) /* if no agent has been connected, bail out */ return _libssh2_error(session, LIBSSH2_ERROR_BAD_USE, "agent not connected"); rc = agent->ops->transact(agent, transctx); - if (rc) { + if(rc) { goto error; } LIBSSH2_FREE(session, transctx->request); @@ -424,11 +435,11 @@ agent_sign(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, len = transctx->response_len; s = transctx->response; len--; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } - if (*s != SSH2_AGENT_SIGN_RESPONSE) { + if(*s != SSH2_AGENT_SIGN_RESPONSE) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } @@ -436,7 +447,7 @@ agent_sign(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, /* Skip the entire length of the signature */ len -= 4; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } @@ -444,47 +455,83 @@ agent_sign(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, /* Skip signing method */ len -= 4; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } method_len = _libssh2_ntohu32(s); s += 4; len -= method_len; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } + + /* method name */ + method_name = LIBSSH2_ALLOC(session, method_len); + if(!method_name) { + rc = LIBSSH2_ERROR_ALLOC; + goto error; + } + memcpy(method_name, s, method_len); s += method_len; + /* check to see if we match requested */ + if((size_t)method_len == session->userauth_pblc_method_len) { + if(memcmp(method_name, session->userauth_pblc_method, method_len)) { + _libssh2_debug(session, + LIBSSH2_TRACE_KEX, + "Agent sign method %.*s", + method_len, method_name); + + rc = LIBSSH2_ERROR_ALGO_UNSUPPORTED; + goto error; + } + } + else { + _libssh2_debug(session, + LIBSSH2_TRACE_KEX, + "Agent sign method %.*s", + method_len, method_name); + + rc = LIBSSH2_ERROR_ALGO_UNSUPPORTED; + goto error; + } + /* Read the signature */ len -= 4; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } *sig_len = _libssh2_ntohu32(s); s += 4; len -= *sig_len; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } *sig = LIBSSH2_ALLOC(session, *sig_len); - if (!*sig) { + if(!*sig) { rc = LIBSSH2_ERROR_ALLOC; goto error; } memcpy(*sig, s, *sig_len); error: + + if(method_name) + LIBSSH2_FREE(session, method_name); + LIBSSH2_FREE(session, transctx->request); transctx->request = NULL; LIBSSH2_FREE(session, transctx->response); transctx->response = NULL; + transctx->state = agent_NB_state_init; + return _libssh2_error(session, rc, "agent sign failure"); } @@ -498,36 +545,39 @@ agent_list_identities(LIBSSH2_AGENT *agent) unsigned char c = SSH2_AGENTC_REQUEST_IDENTITIES; /* Create a request to list identities */ - if (transctx->state == agent_NB_state_init) { + if(transctx->state == agent_NB_state_init) { transctx->request = &c; transctx->request_len = 1; + transctx->send_recv_total = 0; transctx->state = agent_NB_state_request_created; } /* Make sure to be re-called as a result of EAGAIN. */ - if (*transctx->request != SSH2_AGENTC_REQUEST_IDENTITIES) + if(*transctx->request != SSH2_AGENTC_REQUEST_IDENTITIES) return _libssh2_error(agent->session, LIBSSH2_ERROR_BAD_USE, "illegal agent request"); - if (!agent->ops) + if(!agent->ops) /* if no agent has been connected, bail out */ return _libssh2_error(agent->session, LIBSSH2_ERROR_BAD_USE, "agent not connected"); rc = agent->ops->transact(agent, transctx); - if (rc) { - goto error; + if(rc) { + LIBSSH2_FREE(agent->session, transctx->response); + transctx->response = NULL; + return rc; } transctx->request = NULL; len = transctx->response_len; s = transctx->response; len--; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } - if (*s != SSH2_AGENT_IDENTITIES_ANSWER) { + if(*s != SSH2_AGENT_IDENTITIES_ANSWER) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } @@ -535,25 +585,25 @@ agent_list_identities(LIBSSH2_AGENT *agent) /* Read the length of identities */ len -= 4; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } num_identities = _libssh2_ntohu32(s); s += 4; - while (num_identities--) { + while(num_identities--) { struct agent_publickey *identity; ssize_t comment_len; /* Read the length of the blob */ len -= 4; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; goto error; } identity = LIBSSH2_ALLOC(agent->session, sizeof *identity); - if (!identity) { + if(!identity) { rc = LIBSSH2_ERROR_ALLOC; goto error; } @@ -562,7 +612,7 @@ agent_list_identities(LIBSSH2_AGENT *agent) /* Read the blob */ len -= identity->external.blob_len; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; LIBSSH2_FREE(agent->session, identity); goto error; @@ -570,7 +620,7 @@ agent_list_identities(LIBSSH2_AGENT *agent) identity->external.blob = LIBSSH2_ALLOC(agent->session, identity->external.blob_len); - if (!identity->external.blob) { + if(!identity->external.blob) { rc = LIBSSH2_ERROR_ALLOC; LIBSSH2_FREE(agent->session, identity); goto error; @@ -580,7 +630,7 @@ agent_list_identities(LIBSSH2_AGENT *agent) /* Read the length of the comment */ len -= 4; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; LIBSSH2_FREE(agent->session, identity->external.blob); LIBSSH2_FREE(agent->session, identity); @@ -591,7 +641,7 @@ agent_list_identities(LIBSSH2_AGENT *agent) /* Read the comment */ len -= comment_len; - if (len < 0) { + if(len < 0) { rc = LIBSSH2_ERROR_AGENT_PROTOCOL; LIBSSH2_FREE(agent->session, identity->external.blob); LIBSSH2_FREE(agent->session, identity); @@ -600,7 +650,7 @@ agent_list_identities(LIBSSH2_AGENT *agent) identity->external.comment = LIBSSH2_ALLOC(agent->session, comment_len + 1); - if (!identity->external.comment) { + if(!identity->external.comment) { rc = LIBSSH2_ERROR_ALLOC; LIBSSH2_FREE(agent->session, identity->external.blob); LIBSSH2_FREE(agent->session, identity); @@ -621,11 +671,12 @@ agent_list_identities(LIBSSH2_AGENT *agent) } static void -agent_free_identities(LIBSSH2_AGENT *agent) { +agent_free_identities(LIBSSH2_AGENT *agent) +{ struct agent_publickey *node; struct agent_publickey *next; - for (node = _libssh2_list_first(&agent->head); node; node = next) { + for(node = _libssh2_list_first(&agent->head); node; node = next) { next = _libssh2_list_next(&node->node); LIBSSH2_FREE(agent->session, node->external.blob); LIBSSH2_FREE(agent->session, node->external.comment); @@ -664,15 +715,22 @@ libssh2_agent_init(LIBSSH2_SESSION *session) LIBSSH2_AGENT *agent; agent = LIBSSH2_CALLOC(session, sizeof *agent); - if (!agent) { + if(!agent) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate space for agent connection"); return NULL; } agent->fd = LIBSSH2_INVALID_SOCKET; agent->session = session; + agent->identity_agent_path = NULL; _libssh2_list_init(&agent->head); +#ifdef WIN32 + agent->pipe = INVALID_HANDLE_VALUE; + memset(&agent->overlapped, 0, sizeof(OVERLAPPED)); + agent->pending_io = FALSE; +#endif + return agent; } @@ -687,10 +745,10 @@ LIBSSH2_API int libssh2_agent_connect(LIBSSH2_AGENT *agent) { int i, rc = -1; - for (i = 0; supported_backends[i].name; i++) { + for(i = 0; supported_backends[i].name; i++) { agent->ops = supported_backends[i].ops; rc = (agent->ops->connect)(agent); - if (!rc) + if(!rc) return 0; } return rc; @@ -707,7 +765,7 @@ LIBSSH2_API int libssh2_agent_list_identities(LIBSSH2_AGENT *agent) { memset(&agent->transctx, 0, sizeof agent->transctx); - /* Abondon the last fetched identities */ + /* Abandon the last fetched identities */ agent_free_identities(agent); return agent_list_identities(agent); } @@ -730,7 +788,7 @@ libssh2_agent_get_identity(LIBSSH2_AGENT *agent, struct libssh2_agent_publickey *oprev) { struct agent_publickey *node; - if (oprev && oprev->node) { + if(oprev && oprev->node) { /* we have a starting point */ struct agent_publickey *prev = oprev->node; @@ -740,7 +798,7 @@ libssh2_agent_get_identity(LIBSSH2_AGENT *agent, else node = _libssh2_list_first(&agent->head); - if (!node) + if(!node) /* no (more) node */ return 1; @@ -764,7 +822,7 @@ libssh2_agent_userauth(LIBSSH2_AGENT *agent, void *abstract = agent; int rc; - if (agent->session->userauth_pblc_state == libssh2_NB_state_idle) { + if(agent->session->userauth_pblc_state == libssh2_NB_state_idle) { memset(&agent->transctx, 0, sizeof agent->transctx); agent->identity = identity->node; } @@ -789,7 +847,7 @@ libssh2_agent_userauth(LIBSSH2_AGENT *agent, LIBSSH2_API int libssh2_agent_disconnect(LIBSSH2_AGENT *agent) { - if (agent->ops && agent->fd != LIBSSH2_INVALID_SOCKET) + if(agent->ops && agent->fd != LIBSSH2_INVALID_SOCKET) return agent->ops->disconnect(agent); return 0; } @@ -801,11 +859,52 @@ libssh2_agent_disconnect(LIBSSH2_AGENT *agent) * collection of public keys. */ LIBSSH2_API void -libssh2_agent_free(LIBSSH2_AGENT *agent) { +libssh2_agent_free(LIBSSH2_AGENT *agent) +{ /* Allow connection freeing when the socket has lost its connection */ - if (agent->fd != LIBSSH2_INVALID_SOCKET) { + if(agent->fd != LIBSSH2_INVALID_SOCKET) { libssh2_agent_disconnect(agent); } + + if(agent->identity_agent_path != NULL) + LIBSSH2_FREE(agent->session, agent->identity_agent_path); + agent_free_identities(agent); LIBSSH2_FREE(agent->session, agent); } + +/* + * libssh2_agent_set_identity_path() + * + * Allows a custom agent socket path beyond SSH_AUTH_SOCK env + * + */ +LIBSSH2_API void +libssh2_agent_set_identity_path(LIBSSH2_AGENT *agent, const char *path) +{ + if(agent->identity_agent_path) { + LIBSSH2_FREE(agent->session, agent->identity_agent_path); + agent->identity_agent_path = NULL; + } + + if(path) { + size_t path_len = strlen(path); + if(path_len < SIZE_MAX - 1) { + char *path_buf = LIBSSH2_ALLOC(agent->session, path_len + 1); + memcpy(path_buf, path, path_len); + path_buf[path_len] = '\0'; + agent->identity_agent_path = path_buf; + } + } +} + +/* + * libssh2_agent_get_identity_path() + * + * Returns the custom agent socket path if set + * + */ +LIBSSH2_API const char *libssh2_agent_get_identity_path(LIBSSH2_AGENT *agent) +{ + return agent->identity_agent_path; +} diff --git a/vendor/libssh2/src/agent.h b/vendor/libssh2/src/agent.h new file mode 100644 index 0000000000..dfac0715c8 --- /dev/null +++ b/vendor/libssh2/src/agent.h @@ -0,0 +1,112 @@ +#ifndef __LIBSSH2_AGENT_H +#define __LIBSSH2_AGENT_H +/* + * Copyright (c) 2009 by Daiki Ueno + * Copyright (C) 2010-2014 by Daniel Stenberg + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * Neither the name of the copyright holder nor the names + * of any other contributors may be used to endorse or + * promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +#include "libssh2_priv.h" +#include "misc.h" +#include "session.h" +#ifdef WIN32 +#include +#endif + +/* non-blocking mode on agent connection is not yet implemented, but + for future use. */ +typedef enum { + agent_NB_state_init = 0, + agent_NB_state_request_created, + agent_NB_state_request_length_sent, + agent_NB_state_request_sent, + agent_NB_state_response_length_received, + agent_NB_state_response_received +} agent_nonblocking_states; + +typedef struct agent_transaction_ctx { + unsigned char *request; + size_t request_len; + unsigned char *response; + size_t response_len; + agent_nonblocking_states state; + size_t send_recv_total; +} *agent_transaction_ctx_t; + +typedef int (*agent_connect_func)(LIBSSH2_AGENT *agent); +typedef int (*agent_transact_func)(LIBSSH2_AGENT *agent, + agent_transaction_ctx_t transctx); +typedef int (*agent_disconnect_func)(LIBSSH2_AGENT *agent); + +struct agent_publickey { + struct list_node node; + + /* this is the struct we expose externally */ + struct libssh2_agent_publickey external; +}; + +struct agent_ops { + agent_connect_func connect; + agent_transact_func transact; + agent_disconnect_func disconnect; +}; + +struct _LIBSSH2_AGENT +{ + LIBSSH2_SESSION *session; /* the session this "belongs to" */ + + libssh2_socket_t fd; + + struct agent_ops *ops; + + struct agent_transaction_ctx transctx; + struct agent_publickey *identity; + struct list_head head; /* list of public keys */ + + char *identity_agent_path; /* Path to a custom identity agent socket */ + +#ifdef WIN32 + OVERLAPPED overlapped; + HANDLE pipe; + BOOL pending_io; +#endif +}; + +#ifdef WIN32 +extern struct agent_ops agent_ops_openssh; +#endif + +#endif /* __LIBSSH2_AGENT_H */ diff --git a/vendor/libssh2/src/agent_win.c b/vendor/libssh2/src/agent_win.c new file mode 100644 index 0000000000..a1605a95fc --- /dev/null +++ b/vendor/libssh2/src/agent_win.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2009 by Daiki Ueno + * Copyright (C) 2010-2014 by Daniel Stenberg + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * Neither the name of the copyright holder nor the names + * of any other contributors may be used to endorse or + * promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +#include "libssh2_priv.h" +#include "agent.h" +#include "misc.h" +#include +#ifdef HAVE_SYS_UN_H +#include +#else +/* Use the existence of sys/un.h as a test if Unix domain socket is + supported. winsock*.h define PF_UNIX/AF_UNIX but do not actually + support them. */ +#undef PF_UNIX +#endif +#include "userauth.h" +#include "session.h" +#ifdef WIN32 +#include +#endif + +#ifdef WIN32 +/* Code to talk to OpenSSH was taken and modified from the Win32 port of + * Portable OpenSSH by the PowerShell team. Commit + * 8ab565c53f3619d6a1f5ac229e212cad8a52852c of + * https://github.com/PowerShell/openssh-portable.git was used as the base, + * specificaly the following files: + * + * - contrib\win32\win32compat\fileio.c + * - Structure of agent_connect_openssh from ssh_get_authentication_socket + * - Structure of agent_transact_openssh from ssh_request_reply + * - contrib\win32\win32compat\wmain_common.c + * - Windows equivalent functions for common Unix functions, inlined into + * this implementation + * - fileio_connect replacing connect + * - fileio_read replacing read + * - fileio_write replacing write + * - fileio_close replacing close + * + * Author: Tatu Ylonen + * Copyright (c) 1995 Tatu Ylonen , Espoo, Finland + * All rights reserved + * Functions for connecting the local authentication agent. + * + * As far as I am concerned, the code I have written for this software + * can be used freely for any purpose. Any derived versions of this + * software must be clearly marked as such, and if the derived work is + * incompatible with the protocol description in the RFC file, it must be + * called by a name other than "ssh" or "Secure Shell". + * + * SSH2 implementation, + * Copyright (c) 2000 Markus Friedl. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Copyright (c) 2015 Microsoft Corp. + * All rights reserved + * + * Microsoft openssh win32 port + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define WIN32_OPENSSH_AGENT_SOCK "\\\\.\\pipe\\openssh-ssh-agent" + +static int +agent_connect_openssh(LIBSSH2_AGENT *agent) +{ + int ret = LIBSSH2_ERROR_NONE; + const char *path; + HANDLE pipe = INVALID_HANDLE_VALUE; + HANDLE event = NULL; + + path = agent->identity_agent_path; + if(!path) { + path = getenv("SSH_AUTH_SOCK"); + if(!path) + path = WIN32_OPENSSH_AGENT_SOCK; + } + + for(;;) { + pipe = CreateFileA( + path, + GENERIC_READ | GENERIC_WRITE, + 0, + NULL, + OPEN_EXISTING, + /* Non-blocking mode for agent connections is not implemented at + * the point this was implemented. The code for Win32 OpenSSH + * should support non-blocking IO, but the code calling it doesn't + * support it as of yet. + * When non-blocking IO is implemented for the surrounding code, + * uncomment the following line to enable support within the Win32 + * OpenSSH code. + */ + /* FILE_FLAG_OVERLAPPED | */ + SECURITY_SQOS_PRESENT | + SECURITY_IDENTIFICATION, + NULL + ); + + if(pipe != INVALID_HANDLE_VALUE) + break; + if(GetLastError() != ERROR_PIPE_BUSY) + break; + + /* Wait up to 1 second for a pipe instance to become available */ + if(!WaitNamedPipeA(path, 1000)) + break; + } + + if(pipe == INVALID_HANDLE_VALUE) { + ret = _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, + "unable to connect to agent pipe"); + goto cleanup; + } + + if(SetHandleInformation(pipe, HANDLE_FLAG_INHERIT, 0) == FALSE) { + ret = _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, + "unable to set handle information of agent pipe"); + goto cleanup; + } + + event = CreateEventA(NULL, TRUE, FALSE, NULL); + if(event == NULL) { + ret = _libssh2_error(agent->session, LIBSSH2_ERROR_AGENT_PROTOCOL, + "unable to create async I/O event"); + goto cleanup; + } + + agent->pipe = pipe; + pipe = INVALID_HANDLE_VALUE; + agent->overlapped.hEvent = event; + event = NULL; + agent->fd = 0; /* Mark as the connection has been established */ + +cleanup: + if(event != NULL) + CloseHandle(event); + if(pipe != INVALID_HANDLE_VALUE) + CloseHandle(pipe); + return ret; +} + +#define RECV_SEND_ALL(func, agent, buffer, length, total) \ + DWORD bytes_transfered; \ + BOOL ret; \ + DWORD err; \ + int rc; \ + \ + while(*total < length) { \ + if(!agent->pending_io) \ + ret = func(agent->pipe, (char *)buffer + *total, \ + (DWORD)(length - *total), &bytes_transfered, \ + &agent->overlapped); \ + else \ + ret = GetOverlappedResult(agent->pipe, &agent->overlapped, \ + &bytes_transfered, FALSE); \ + \ + *total += bytes_transfered; \ + if(!ret) { \ + err = GetLastError(); \ + if((!agent->pending_io && ERROR_IO_PENDING == err) \ + || (agent->pending_io && ERROR_IO_INCOMPLETE == err)) { \ + agent->pending_io = TRUE; \ + return LIBSSH2_ERROR_EAGAIN; \ + } \ + \ + return LIBSSH2_ERROR_SOCKET_NONE; \ + } \ + agent->pending_io = FALSE; \ + } \ + \ + rc = (int)*total; \ + *total = 0; \ + return rc; + +static int +win32_openssh_send_all(LIBSSH2_AGENT *agent, void *buffer, size_t length, + size_t *send_recv_total) +{ + RECV_SEND_ALL(WriteFile, agent, buffer, length, send_recv_total) +} + +static int +win32_openssh_recv_all(LIBSSH2_AGENT *agent, void *buffer, size_t length, + size_t *send_recv_total) +{ + RECV_SEND_ALL(ReadFile, agent, buffer, length, send_recv_total) +} + +#undef RECV_SEND_ALL + +static int +agent_transact_openssh(LIBSSH2_AGENT *agent, agent_transaction_ctx_t transctx) +{ + unsigned char buf[4]; + int rc; + + /* Send the length of the request */ + if(transctx->state == agent_NB_state_request_created) { + _libssh2_htonu32(buf, (uint32_t)transctx->request_len); + rc = win32_openssh_send_all(agent, buf, sizeof buf, + &transctx->send_recv_total); + if(rc == LIBSSH2_ERROR_EAGAIN) + return LIBSSH2_ERROR_EAGAIN; + else if(rc < 0) + return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_SEND, + "agent send failed"); + transctx->state = agent_NB_state_request_length_sent; + } + + /* Send the request body */ + if(transctx->state == agent_NB_state_request_length_sent) { + rc = win32_openssh_send_all(agent, transctx->request, + transctx->request_len, + &transctx->send_recv_total); + if(rc == LIBSSH2_ERROR_EAGAIN) + return LIBSSH2_ERROR_EAGAIN; + else if(rc < 0) + return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_SEND, + "agent send failed"); + transctx->state = agent_NB_state_request_sent; + } + + /* Receive the length of the body */ + if(transctx->state == agent_NB_state_request_sent) { + rc = win32_openssh_recv_all(agent, buf, sizeof buf, + &transctx->send_recv_total); + if(rc == LIBSSH2_ERROR_EAGAIN) + return LIBSSH2_ERROR_EAGAIN; + else if(rc < 0) + return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_RECV, + "agent recv failed"); + + transctx->response_len = _libssh2_ntohu32(buf); + transctx->response = LIBSSH2_ALLOC(agent->session, + transctx->response_len); + if(!transctx->response) + return LIBSSH2_ERROR_ALLOC; + + transctx->state = agent_NB_state_response_length_received; + } + + /* Receive the response body */ + if(transctx->state == agent_NB_state_response_length_received) { + rc = win32_openssh_recv_all(agent, transctx->response, + transctx->response_len, + &transctx->send_recv_total); + if(rc == LIBSSH2_ERROR_EAGAIN) + return LIBSSH2_ERROR_EAGAIN; + else if(rc < 0) + return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_RECV, + "agent recv failed"); + transctx->state = agent_NB_state_response_received; + } + + return LIBSSH2_ERROR_NONE; +} + +static int +agent_disconnect_openssh(LIBSSH2_AGENT *agent) +{ + if(!CancelIo(agent->pipe)) + return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_DISCONNECT, + "failed to cancel pending IO of agent pipe"); + if(!CloseHandle(agent->overlapped.hEvent)) + return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_DISCONNECT, + "failed to close handle to async I/O event"); + agent->overlapped.hEvent = NULL; + /* let queued APCs (if any) drain */ + SleepEx(0, TRUE); + if(!CloseHandle(agent->pipe)) + return _libssh2_error(agent->session, LIBSSH2_ERROR_SOCKET_DISCONNECT, + "failed to close handle to agent pipe"); + + agent->pipe = INVALID_HANDLE_VALUE; + agent->fd = LIBSSH2_INVALID_SOCKET; + + return LIBSSH2_ERROR_NONE; +} + +struct agent_ops agent_ops_openssh = { + agent_connect_openssh, + agent_transact_openssh, + agent_disconnect_openssh +}; +#endif /* WIN32 */ diff --git a/vendor/libssh2/src/bcrypt_pbkdf.c b/vendor/libssh2/src/bcrypt_pbkdf.c new file mode 100644 index 0000000000..f782bcac5f --- /dev/null +++ b/vendor/libssh2/src/bcrypt_pbkdf.c @@ -0,0 +1,180 @@ +/* $OpenBSD: bcrypt_pbkdf.c,v 1.4 2013/07/29 00:55:53 tedu Exp $ */ +/* + * Copyright (c) 2013 Ted Unangst + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef HAVE_BCRYPT_PBKDF + +#include "libssh2_priv.h" +#include +#include +#ifdef HAVE_SYS_PARAM_H +#include +#endif + +#include "blf.h" + +#define MINIMUM(a,b) (((a) < (b)) ? (a) : (b)) + +/* + * pkcs #5 pbkdf2 implementation using the "bcrypt" hash + * + * The bcrypt hash function is derived from the bcrypt password hashing + * function with the following modifications: + * 1. The input password and salt are preprocessed with SHA512. + * 2. The output length is expanded to 256 bits. + * 3. Subsequently the magic string to be encrypted is lengthened and modified + * to "OxychromaticBlowfishSwatDynamite" + * 4. The hash function is defined to perform 64 rounds of initial state + * expansion. (More rounds are performed by iterating the hash.) + * + * Note that this implementation pulls the SHA512 operations into the caller + * as a performance optimization. + * + * One modification from official pbkdf2. Instead of outputting key material + * linearly, we mix it. pbkdf2 has a known weakness where if one uses it to + * generate (i.e.) 512 bits of key material for use as two 256 bit keys, an + * attacker can merely run once through the outer loop below, but the user + * always runs it twice. Shuffling output bytes requires computing the + * entirety of the key material to assemble any subkey. This is something a + * wise caller could do; we just do it for you. + */ + +#define BCRYPT_BLOCKS 8 +#define BCRYPT_HASHSIZE (BCRYPT_BLOCKS * 4) + +static void +bcrypt_hash(uint8_t *sha2pass, uint8_t *sha2salt, uint8_t *out) +{ + blf_ctx state; + uint8_t ciphertext[BCRYPT_HASHSIZE] = + "OxychromaticBlowfishSwatDynamite"; + uint32_t cdata[BCRYPT_BLOCKS]; + int i; + uint16_t j; + size_t shalen = SHA512_DIGEST_LENGTH; + + /* key expansion */ + Blowfish_initstate(&state); + Blowfish_expandstate(&state, sha2salt, shalen, sha2pass, shalen); + for(i = 0; i < 64; i++) { + Blowfish_expand0state(&state, sha2salt, shalen); + Blowfish_expand0state(&state, sha2pass, shalen); + } + + /* encryption */ + j = 0; + for(i = 0; i < BCRYPT_BLOCKS; i++) + cdata[i] = Blowfish_stream2word(ciphertext, sizeof(ciphertext), + &j); + for(i = 0; i < 64; i++) + blf_enc(&state, cdata, BCRYPT_BLOCKS / 2); + + /* copy out */ + for(i = 0; i < BCRYPT_BLOCKS; i++) { + out[4 * i + 3] = (cdata[i] >> 24) & 0xff; + out[4 * i + 2] = (cdata[i] >> 16) & 0xff; + out[4 * i + 1] = (cdata[i] >> 8) & 0xff; + out[4 * i + 0] = cdata[i] & 0xff; + } + + /* zap */ + _libssh2_explicit_zero(ciphertext, sizeof(ciphertext)); + _libssh2_explicit_zero(cdata, sizeof(cdata)); + _libssh2_explicit_zero(&state, sizeof(state)); +} + +int +bcrypt_pbkdf(const char *pass, size_t passlen, const uint8_t *salt, + size_t saltlen, + uint8_t *key, size_t keylen, unsigned int rounds) +{ + uint8_t sha2pass[SHA512_DIGEST_LENGTH]; + uint8_t sha2salt[SHA512_DIGEST_LENGTH]; + uint8_t out[BCRYPT_HASHSIZE]; + uint8_t tmpout[BCRYPT_HASHSIZE]; + uint8_t *countsalt; + size_t i, j, amt, stride; + uint32_t count; + size_t origkeylen = keylen; + libssh2_sha512_ctx ctx; + + /* nothing crazy */ + if(rounds < 1) + return -1; + if(passlen == 0 || saltlen == 0 || keylen == 0 || + keylen > sizeof(out) * sizeof(out) || saltlen > 1<<20) + return -1; + countsalt = calloc(1, saltlen + 4); + if(countsalt == NULL) + return -1; + stride = (keylen + sizeof(out) - 1) / sizeof(out); + amt = (keylen + stride - 1) / stride; + + memcpy(countsalt, salt, saltlen); + + /* collapse password */ + libssh2_sha512_init(&ctx); + libssh2_sha512_update(ctx, pass, passlen); + libssh2_sha512_final(ctx, sha2pass); + + /* generate key, sizeof(out) at a time */ + for(count = 1; keylen > 0; count++) { + countsalt[saltlen + 0] = (count >> 24) & 0xff; + countsalt[saltlen + 1] = (count >> 16) & 0xff; + countsalt[saltlen + 2] = (count >> 8) & 0xff; + countsalt[saltlen + 3] = count & 0xff; + + /* first round, salt is salt */ + libssh2_sha512_init(&ctx); + libssh2_sha512_update(ctx, countsalt, saltlen + 4); + libssh2_sha512_final(ctx, sha2salt); + + bcrypt_hash(sha2pass, sha2salt, tmpout); + memcpy(out, tmpout, sizeof(out)); + + for(i = 1; i < rounds; i++) { + /* subsequent rounds, salt is previous output */ + libssh2_sha512_init(&ctx); + libssh2_sha512_update(ctx, tmpout, sizeof(tmpout)); + libssh2_sha512_final(ctx, sha2salt); + + bcrypt_hash(sha2pass, sha2salt, tmpout); + for(j = 0; j < sizeof(out); j++) + out[j] ^= tmpout[j]; + } + + /* + * pbkdf2 deviation: output the key material non-linearly. + */ + amt = MINIMUM(amt, keylen); + for(i = 0; i < amt; i++) { + size_t dest = i * stride + (count - 1); + if(dest >= origkeylen) { + break; + } + key[dest] = out[i]; + } + keylen -= i; + } + + /* zap */ + _libssh2_explicit_zero(out, sizeof(out)); + free(countsalt); + + return 0; +} +#endif /* HAVE_BCRYPT_PBKDF */ diff --git a/vendor/libssh2/src/blf.h b/vendor/libssh2/src/blf.h new file mode 100644 index 0000000000..5b7c8aae06 --- /dev/null +++ b/vendor/libssh2/src/blf.h @@ -0,0 +1,89 @@ +#ifndef __LIBSSH2_BLF_H +#define __LIBSSH2_BLF_H +/* $OpenBSD: blf.h,v 1.7 2007/03/14 17:59:41 grunk Exp $ */ +/* + * Blowfish - a fast block cipher designed by Bruce Schneier + * + * Copyright 1997 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Niels Provos. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined(HAVE_BCRYPT_PBKDF) && !defined(HAVE_BLH_H) + +/* Schneier specifies a maximum key length of 56 bytes. + * This ensures that every key bit affects every cipher + * bit. However, the subkeys can hold up to 72 bytes. + * Warning: For normal blowfish encryption only 56 bytes + * of the key affect all cipherbits. + */ + +#define BLF_N 16 /* Number of Subkeys */ +#define BLF_MAXKEYLEN ((BLF_N-2)*4) /* 448 bits */ +#define BLF_MAXUTILIZED ((BLF_N + 2)*4) /* 576 bits */ + +/* Blowfish context */ +typedef struct BlowfishContext { + uint32_t S[4][256]; /* S-Boxes */ + uint32_t P[BLF_N + 2]; /* Subkeys */ +} blf_ctx; + +/* Raw access to customized Blowfish + * blf_key is just: + * Blowfish_initstate( state ) + * Blowfish_expand0state( state, key, keylen ) + */ + +void Blowfish_encipher(blf_ctx *, uint32_t *, uint32_t *); +void Blowfish_decipher(blf_ctx *, uint32_t *, uint32_t *); +void Blowfish_initstate(blf_ctx *); +void Blowfish_expand0state(blf_ctx *, const uint8_t *, uint16_t); +void Blowfish_expandstate +(blf_ctx *, const uint8_t *, uint16_t, const uint8_t *, uint16_t); + +/* Standard Blowfish */ + +void blf_key(blf_ctx *, const uint8_t *, uint16_t); +void blf_enc(blf_ctx *, uint32_t *, uint16_t); +void blf_dec(blf_ctx *, uint32_t *, uint16_t); + +void blf_ecb_encrypt(blf_ctx *, uint8_t *, uint32_t); +void blf_ecb_decrypt(blf_ctx *, uint8_t *, uint32_t); + +void blf_cbc_encrypt(blf_ctx *, uint8_t *, uint8_t *, uint32_t); +void blf_cbc_decrypt(blf_ctx *, uint8_t *, uint8_t *, uint32_t); + +/* Converts uint8_t to uint32_t */ +uint32_t Blowfish_stream2word(const uint8_t *, uint16_t, uint16_t *); + +/* bcrypt with pbkd */ +int bcrypt_pbkdf(const char *pass, size_t passlen, const uint8_t *salt, + size_t saltlen, + uint8_t *key, size_t keylen, unsigned int rounds); + +#endif /* !defined(HAVE_BCRYPT_PBKDF) && !defined(HAVE_BLH_H) */ +#endif /* __LIBSSH2_BLF_H */ diff --git a/vendor/libssh2/src/blowfish.c b/vendor/libssh2/src/blowfish.c new file mode 100644 index 0000000000..4aefc66ac7 --- /dev/null +++ b/vendor/libssh2/src/blowfish.c @@ -0,0 +1,697 @@ +/* $OpenBSD: blowfish.c,v 1.18 2004/11/02 17:23:26 hshoexer Exp $ */ +/* + * Blowfish block cipher for OpenBSD + * Copyright 1997 Niels Provos + * All rights reserved. + * + * Implementation advice by David Mazieres . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Niels Provos. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This code is derived from section 14.3 and the given source + * in section V of Applied Cryptography, second edition. + * Blowfish is an unpatented fast block cipher designed by + * Bruce Schneier. + */ + + +#if !defined(HAVE_BCRYPT_PBKDF) && (!defined(HAVE_BLOWFISH_INITSTATE) || \ + !defined(HAVE_BLOWFISH_EXPAND0STATE) || \ + !defined(HAVE_BLF_ENC)) + +#if 0 +#include /* used for debugging */ +#include +#endif + +#include + +#include "libssh2.h" +#include "blf.h" + +#undef inline +#ifdef __GNUC__ +#define inline __inline +#else /* !__GNUC__ */ +#define inline +#endif /* !__GNUC__ */ + +/* Function for Feistel Networks */ + +#define F(s, x) ((((s)[ (((x)>>24)&0xFF)] \ + + (s)[0x100 + (((x)>>16)&0xFF)]) \ + ^ (s)[0x200 + (((x)>> 8)&0xFF)]) \ + + (s)[0x300 + ( (x) &0xFF)]) + +#define BLFRND(s,p,i,j,n) (i ^= F(s,j) ^ (p)[n]) + +void +Blowfish_encipher(blf_ctx *c, uint32_t *xl, uint32_t *xr) +{ + uint32_t Xl; + uint32_t Xr; + uint32_t *s = c->S[0]; + uint32_t *p = c->P; + + Xl = *xl; + Xr = *xr; + + Xl ^= p[0]; + BLFRND(s, p, Xr, Xl, 1); BLFRND(s, p, Xl, Xr, 2); + BLFRND(s, p, Xr, Xl, 3); BLFRND(s, p, Xl, Xr, 4); + BLFRND(s, p, Xr, Xl, 5); BLFRND(s, p, Xl, Xr, 6); + BLFRND(s, p, Xr, Xl, 7); BLFRND(s, p, Xl, Xr, 8); + BLFRND(s, p, Xr, Xl, 9); BLFRND(s, p, Xl, Xr, 10); + BLFRND(s, p, Xr, Xl, 11); BLFRND(s, p, Xl, Xr, 12); + BLFRND(s, p, Xr, Xl, 13); BLFRND(s, p, Xl, Xr, 14); + BLFRND(s, p, Xr, Xl, 15); BLFRND(s, p, Xl, Xr, 16); + + *xl = Xr ^ p[17]; + *xr = Xl; +} + +void +Blowfish_decipher(blf_ctx *c, uint32_t *xl, uint32_t *xr) +{ + uint32_t Xl; + uint32_t Xr; + uint32_t *s = c->S[0]; + uint32_t *p = c->P; + + Xl = *xl; + Xr = *xr; + + Xl ^= p[17]; + BLFRND(s, p, Xr, Xl, 16); BLFRND(s, p, Xl, Xr, 15); + BLFRND(s, p, Xr, Xl, 14); BLFRND(s, p, Xl, Xr, 13); + BLFRND(s, p, Xr, Xl, 12); BLFRND(s, p, Xl, Xr, 11); + BLFRND(s, p, Xr, Xl, 10); BLFRND(s, p, Xl, Xr, 9); + BLFRND(s, p, Xr, Xl, 8); BLFRND(s, p, Xl, Xr, 7); + BLFRND(s, p, Xr, Xl, 6); BLFRND(s, p, Xl, Xr, 5); + BLFRND(s, p, Xr, Xl, 4); BLFRND(s, p, Xl, Xr, 3); + BLFRND(s, p, Xr, Xl, 2); BLFRND(s, p, Xl, Xr, 1); + + *xl = Xr ^ p[0]; + *xr = Xl; +} + +void +Blowfish_initstate(blf_ctx *c) +{ + /* P-box and S-box tables initialized with digits of Pi */ + + static const blf_ctx initstate = + { { + { + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, + 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, + 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, + 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, + 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, + 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, + 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, + 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, + 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, + 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, + 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, + 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, + 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, + 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, + 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, + 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, + 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, + 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, + 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, + 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, + 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, + 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, + 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, + 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, + 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, + 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, + 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, + 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, + 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, + 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, + 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, + 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, + 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, + 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, + 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, + 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, + 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, + 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, + 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, + 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, + 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, + 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, + 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a}, + { + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, + 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, + 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, + 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, + 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, + 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, + 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, + 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, + 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, + 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, + 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, + 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, + 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, + 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, + 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, + 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, + 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, + 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, + 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, + 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, + 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, + 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, + 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, + 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, + 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, + 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, + 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, + 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, + 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, + 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, + 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, + 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, + 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, + 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, + 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, + 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, + 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, + 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, + 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, + 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, + 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, + 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, + 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7}, + { + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, + 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, + 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, + 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, + 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, + 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, + 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, + 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, + 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, + 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, + 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, + 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, + 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, + 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, + 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, + 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, + 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, + 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, + 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, + 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, + 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, + 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, + 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, + 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, + 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, + 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, + 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, + 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, + 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, + 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, + 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, + 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, + 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, + 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, + 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, + 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, + 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, + 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, + 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, + 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, + 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, + 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, + 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0}, + { + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, + 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, + 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, + 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, + 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, + 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, + 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, + 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, + 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, + 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, + 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, + 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, + 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, + 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, + 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, + 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, + 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, + 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, + 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, + 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, + 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, + 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, + 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, + 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, + 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, + 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, + 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, + 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, + 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, + 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, + 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, + 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, + 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, + 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, + 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, + 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, + 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, + 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, + 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, + 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, + 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, + 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, + 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6} + }, + { + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, + 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, + 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, + 0x9216d5d9, 0x8979fb1b + } }; + + *c = initstate; +} + +uint32_t +Blowfish_stream2word(const uint8_t *data, uint16_t databytes, + uint16_t *current) +{ + uint8_t i; + uint16_t j; + uint32_t temp; + + temp = 0x00000000; + j = *current; + + for(i = 0; i < 4; i++, j++) { + if(j >= databytes) + j = 0; + temp = (temp << 8) | data[j]; + } + + *current = j; + return temp; +} + +void +Blowfish_expand0state(blf_ctx *c, const uint8_t *key, uint16_t keybytes) +{ + uint16_t i; + uint16_t j; + uint16_t k; + uint32_t temp; + uint32_t datal; + uint32_t datar; + + j = 0; + for(i = 0; i < BLF_N + 2; i++) { + /* Extract 4 int8 to 1 int32 from keystream */ + temp = Blowfish_stream2word(key, keybytes, &j); + c->P[i] = c->P[i] ^ temp; + } + + j = 0; + datal = 0x00000000; + datar = 0x00000000; + for(i = 0; i < BLF_N + 2; i += 2) { + Blowfish_encipher(c, &datal, &datar); + + c->P[i] = datal; + c->P[i + 1] = datar; + } + + for(i = 0; i < 4; i++) { + for(k = 0; k < 256; k += 2) { + Blowfish_encipher(c, &datal, &datar); + + c->S[i][k] = datal; + c->S[i][k + 1] = datar; + } + } +} + + +void +Blowfish_expandstate(blf_ctx *c, const uint8_t *data, uint16_t databytes, + const uint8_t *key, uint16_t keybytes) +{ + uint16_t i; + uint16_t j; + uint16_t k; + uint32_t temp; + uint32_t datal; + uint32_t datar; + + j = 0; + for(i = 0; i < BLF_N + 2; i++) { + /* Extract 4 int8 to 1 int32 from keystream */ + temp = Blowfish_stream2word(key, keybytes, &j); + c->P[i] = c->P[i] ^ temp; + } + + j = 0; + datal = 0x00000000; + datar = 0x00000000; + for(i = 0; i < BLF_N + 2; i += 2) { + datal ^= Blowfish_stream2word(data, databytes, &j); + datar ^= Blowfish_stream2word(data, databytes, &j); + Blowfish_encipher(c, &datal, &datar); + + c->P[i] = datal; + c->P[i + 1] = datar; + } + + for(i = 0; i < 4; i++) { + for(k = 0; k < 256; k += 2) { + datal ^= Blowfish_stream2word(data, databytes, &j); + datar ^= Blowfish_stream2word(data, databytes, &j); + Blowfish_encipher(c, &datal, &datar); + + c->S[i][k] = datal; + c->S[i][k + 1] = datar; + } + } + +} + +void +blf_key(blf_ctx *c, const uint8_t *k, uint16_t len) +{ + /* Initialize S-boxes and subkeys with Pi */ + Blowfish_initstate(c); + + /* Transform S-boxes and subkeys with key */ + Blowfish_expand0state(c, k, len); +} + +void +blf_enc(blf_ctx *c, uint32_t *data, uint16_t blocks) +{ + uint32_t *d; + uint16_t i; + + d = data; + for(i = 0; i < blocks; i++) { + Blowfish_encipher(c, d, d + 1); + d += 2; + } +} + +void +blf_dec(blf_ctx *c, uint32_t *data, uint16_t blocks) +{ + uint32_t *d; + uint16_t i; + + d = data; + for(i = 0; i < blocks; i++) { + Blowfish_decipher(c, d, d + 1); + d += 2; + } +} + +void +blf_ecb_encrypt(blf_ctx *c, uint8_t *data, uint32_t len) +{ + uint32_t l, r; + uint32_t i; + + for(i = 0; i < len; i += 8) { + l = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; + r = data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]; + Blowfish_encipher(c, &l, &r); + data[0] = l >> 24 & 0xff; + data[1] = l >> 16 & 0xff; + data[2] = l >> 8 & 0xff; + data[3] = l & 0xff; + data[4] = r >> 24 & 0xff; + data[5] = r >> 16 & 0xff; + data[6] = r >> 8 & 0xff; + data[7] = r & 0xff; + data += 8; + } +} + +void +blf_ecb_decrypt(blf_ctx *c, uint8_t *data, uint32_t len) +{ + uint32_t l, r; + uint32_t i; + + for(i = 0; i < len; i += 8) { + l = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; + r = data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]; + Blowfish_decipher(c, &l, &r); + data[0] = l >> 24 & 0xff; + data[1] = l >> 16 & 0xff; + data[2] = l >> 8 & 0xff; + data[3] = l & 0xff; + data[4] = r >> 24 & 0xff; + data[5] = r >> 16 & 0xff; + data[6] = r >> 8 & 0xff; + data[7] = r & 0xff; + data += 8; + } +} + +void +blf_cbc_encrypt(blf_ctx *c, uint8_t *iv, uint8_t *data, uint32_t len) +{ + uint32_t l, r; + uint32_t i, j; + + for(i = 0; i < len; i += 8) { + for(j = 0; j < 8; j++) + data[j] ^= iv[j]; + l = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; + r = data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]; + Blowfish_encipher(c, &l, &r); + data[0] = l >> 24 & 0xff; + data[1] = l >> 16 & 0xff; + data[2] = l >> 8 & 0xff; + data[3] = l & 0xff; + data[4] = r >> 24 & 0xff; + data[5] = r >> 16 & 0xff; + data[6] = r >> 8 & 0xff; + data[7] = r & 0xff; + iv = data; + data += 8; + } +} + +void +blf_cbc_decrypt(blf_ctx *c, uint8_t *iva, uint8_t *data, uint32_t len) +{ + uint32_t l, r; + uint8_t *iv; + uint32_t i, j; + + iv = data + len - 16; + data = data + len - 8; + for(i = len - 8; i >= 8; i -= 8) { + l = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; + r = data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]; + Blowfish_decipher(c, &l, &r); + data[0] = l >> 24 & 0xff; + data[1] = l >> 16 & 0xff; + data[2] = l >> 8 & 0xff; + data[3] = l & 0xff; + data[4] = r >> 24 & 0xff; + data[5] = r >> 16 & 0xff; + data[6] = r >> 8 & 0xff; + data[7] = r & 0xff; + for(j = 0; j < 8; j++) + data[j] ^= iv[j]; + iv -= 8; + data -= 8; + } + l = data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; + r = data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]; + Blowfish_decipher(c, &l, &r); + data[0] = l >> 24 & 0xff; + data[1] = l >> 16 & 0xff; + data[2] = l >> 8 & 0xff; + data[3] = l & 0xff; + data[4] = r >> 24 & 0xff; + data[5] = r >> 16 & 0xff; + data[6] = r >> 8 & 0xff; + data[7] = r & 0xff; + for(j = 0; j < 8; j++) + data[j] ^= iva[j]; +} + +#if 0 +void +report(uint32_t data[], uint16_t len) +{ + uint16_t i; + for(i = 0; i < len; i += 2) + printf("Block %0hd: %08lx %08lx.\n", + i / 2, data[i], data[i + 1]); +} +void +main(void) +{ + + blf_ctx c; + char key[] = "AAAAA"; + char key2[] = "abcdefghijklmnopqrstuvwxyz"; + + uint32_t data[10]; + uint32_t data2[] = + {0x424c4f57l, 0x46495348l}; + + uint16_t i; + + /* First test */ + for(i = 0; i < 10; i++) + data[i] = i; + + blf_key(&c, (uint8_t *) key, 5); + blf_enc(&c, data, 5); + blf_dec(&c, data, 1); + blf_dec(&c, data + 2, 4); + printf("Should read as 0 - 9.\n"); + report(data, 10); + + /* Second test */ + blf_key(&c, (uint8_t *) key2, strlen(key2)); + blf_enc(&c, data2, 1); + printf("\nShould read as: 0x324ed0fe 0xf413a203.\n"); + report(data2, 2); + blf_dec(&c, data2, 1); + report(data2, 2); +} +#endif + +#endif /* !defined(HAVE_BCRYPT_PBKDF) && \ + (!defined(HAVE_BLOWFISH_INITSTATE) || \ + !defined(HAVE_BLOWFISH_EXPAND0STATE) || \ + '!defined(HAVE_BLF_ENC)) */ diff --git a/vendor/libssh2/src/channel.c b/vendor/libssh2/src/channel.c index 538a0ab0d9..78ed40e877 100644 --- a/vendor/libssh2/src/channel.c +++ b/vendor/libssh2/src/channel.c @@ -1,6 +1,6 @@ /* Copyright (c) 2004-2007 Sara Golemon * Copyright (c) 2005 Mikhail Gusarov - * Copyright (c) 2008-2014 by Daniel Stenberg + * Copyright (c) 2008-2019 by Daniel Stenberg * * All rights reserved. * @@ -66,8 +66,8 @@ _libssh2_channel_nextid(LIBSSH2_SESSION * session) channel = _libssh2_list_first(&session->channels); - while (channel) { - if (channel->local.id > id) { + while(channel) { + if(channel->local.id > id) { id = channel->local.id; } channel = _libssh2_list_next(&channel->node); @@ -100,7 +100,7 @@ _libssh2_channel_locate(LIBSSH2_SESSION *session, uint32_t channel_id) for(channel = _libssh2_list_first(&session->channels); channel; channel = _libssh2_list_next(&channel->node)) { - if (channel->local.id == channel_id) + if(channel->local.id == channel_id) return channel; } @@ -112,7 +112,7 @@ _libssh2_channel_locate(LIBSSH2_SESSION *session, uint32_t channel_id) for(channel = _libssh2_list_first(&l->queue); channel; channel = _libssh2_list_next(&channel->node)) { - if (channel->local.id == channel_id) + if(channel->local.id == channel_id) return channel; } } @@ -141,7 +141,7 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, unsigned char *s; int rc; - if (session->open_state == libssh2_NB_state_idle) { + if(session->open_state == libssh2_NB_state_idle) { session->open_channel = NULL; session->open_packet = NULL; session->open_data = NULL; @@ -159,7 +159,7 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, packet_size); session->open_channel = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_CHANNEL)); - if (!session->open_channel) { + if(!session->open_channel) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate space for channel data"); return NULL; @@ -167,7 +167,7 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, session->open_channel->channel_type_len = channel_type_len; session->open_channel->channel_type = LIBSSH2_ALLOC(session, channel_type_len); - if (!session->open_channel->channel_type) { + if(!session->open_channel->channel_type) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Failed allocating memory for channel type name"); LIBSSH2_FREE(session, session->open_channel); @@ -189,7 +189,7 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, s = session->open_packet = LIBSSH2_ALLOC(session, session->open_packet_len); - if (!session->open_packet) { + if(!session->open_packet) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate temporary space for packet"); goto channel_error; @@ -205,17 +205,17 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, session->open_state = libssh2_NB_state_created; } - if (session->open_state == libssh2_NB_state_created) { + if(session->open_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, session->open_packet, session->open_packet_len, message, message_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending channel-open request"); return NULL; } - else if (rc) { + else if(rc) { _libssh2_error(session, rc, "Unable to send channel-open request"); goto channel_error; @@ -224,21 +224,36 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, session->open_state = libssh2_NB_state_sent; } - if (session->open_state == libssh2_NB_state_sent) { + if(session->open_state == libssh2_NB_state_sent) { rc = _libssh2_packet_requirev(session, reply_codes, &session->open_data, &session->open_data_len, 1, session->open_packet + 5 + channel_type_len, 4, &session->open_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); return NULL; - } else if (rc) { + } + else if(rc) { + _libssh2_error(session, rc, "Unexpected error"); + goto channel_error; + } + + if(session->open_data_len < 1) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet size"); goto channel_error; } - if (session->open_data[0] == SSH_MSG_CHANNEL_OPEN_CONFIRMATION) { + if(session->open_data[0] == SSH_MSG_CHANNEL_OPEN_CONFIRMATION) { + + if(session->open_data_len < 17) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet size"); + goto channel_error; + } + session->open_channel->remote.id = _libssh2_ntohu32(session->open_data + 5); session->open_channel->local.window_size = @@ -265,12 +280,14 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, return session->open_channel; } - if (session->open_data[0] == SSH_MSG_CHANNEL_OPEN_FAILURE) { - unsigned int reason_code = _libssh2_ntohu32(session->open_data + 5); - switch (reason_code) { + if(session->open_data[0] == SSH_MSG_CHANNEL_OPEN_FAILURE) { + unsigned int reason_code = + _libssh2_ntohu32(session->open_data + 5); + switch(reason_code) { case SSH_OPEN_ADMINISTRATIVELY_PROHIBITED: _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_FAILURE, - "Channel open failure (administratively prohibited)"); + "Channel open failure " + "(administratively prohibited)"); break; case SSH_OPEN_CONNECT_FAILED: _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_FAILURE, @@ -293,15 +310,15 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, channel_error: - if (session->open_data) { + if(session->open_data) { LIBSSH2_FREE(session, session->open_data); session->open_data = NULL; } - if (session->open_packet) { + if(session->open_packet) { LIBSSH2_FREE(session, session->open_packet); session->open_packet = NULL; } - if (session->open_channel) { + if(session->open_channel) { unsigned char channel_id[4]; LIBSSH2_FREE(session, session->open_channel->channel_type); @@ -309,7 +326,7 @@ _libssh2_channel_open(LIBSSH2_SESSION * session, const char *channel_type, /* Clear out packets meant for this channel */ _libssh2_htonu32(channel_id, session->open_channel->local.id); - while ((_libssh2_packet_ask(session, SSH_MSG_CHANNEL_DATA, + while((_libssh2_packet_ask(session, SSH_MSG_CHANNEL_DATA, &session->open_data, &session->open_data_len, 1, channel_id, 4) >= 0) @@ -366,7 +383,7 @@ channel_direct_tcpip(LIBSSH2_SESSION * session, const char *host, LIBSSH2_CHANNEL *channel; unsigned char *s; - if (session->direct_state == libssh2_NB_state_idle) { + if(session->direct_state == libssh2_NB_state_idle) { session->direct_host_len = strlen(host); session->direct_shost_len = strlen(shost); /* host_len(4) + port(4) + shost_len(4) + sport(4) */ @@ -374,14 +391,15 @@ channel_direct_tcpip(LIBSSH2_SESSION * session, const char *host, session->direct_host_len + session->direct_shost_len + 16; _libssh2_debug(session, LIBSSH2_TRACE_CONN, - "Requesting direct-tcpip session to from %s:%d to %s:%d", + "Requesting direct-tcpip session from %s:%d to %s:%d", shost, sport, host, port); s = session->direct_message = LIBSSH2_ALLOC(session, session->direct_message_len); - if (!session->direct_message) { + if(!session->direct_message) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Unable to allocate memory for direct-tcpip connection"); + "Unable to allocate memory for " + "direct-tcpip connection"); return NULL; } _libssh2_store_str(&s, host, session->direct_host_len); @@ -398,7 +416,7 @@ channel_direct_tcpip(LIBSSH2_SESSION * session, const char *host, session->direct_message, session->direct_message_len); - if (!channel && + if(!channel && libssh2_session_last_errno(session) == LIBSSH2_ERROR_EAGAIN) { /* The error code is still set to LIBSSH2_ERROR_EAGAIN, set our state to created to avoid re-creating the package on next invoke */ @@ -429,7 +447,8 @@ libssh2_channel_direct_tcpip_ex(LIBSSH2_SESSION *session, const char *host, return NULL; BLOCK_ADJUST_ERRNO(ptr, session, - channel_direct_tcpip(session, host, port, shost, sport)); + channel_direct_tcpip(session, host, port, + shost, sport)); return ptr; } @@ -450,7 +469,7 @@ channel_forward_listen(LIBSSH2_SESSION * session, const char *host, if(!host) host = "0.0.0.0"; - if (session->fwdLstn_state == libssh2_NB_state_idle) { + if(session->fwdLstn_state == libssh2_NB_state_idle) { session->fwdLstn_host_len = strlen(host); /* 14 = packet_type(1) + request_len(4) + want_replay(1) + host_len(4) + port(4) */ @@ -467,7 +486,7 @@ channel_forward_listen(LIBSSH2_SESSION * session, const char *host, s = session->fwdLstn_packet = LIBSSH2_ALLOC(session, session->fwdLstn_packet_len); - if (!session->fwdLstn_packet) { + if(!session->fwdLstn_packet) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for setenv packet"); return NULL; @@ -483,18 +502,18 @@ channel_forward_listen(LIBSSH2_SESSION * session, const char *host, session->fwdLstn_state = libssh2_NB_state_created; } - if (session->fwdLstn_state == libssh2_NB_state_created) { + if(session->fwdLstn_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, session->fwdLstn_packet, session->fwdLstn_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block sending global-request packet for " "forward listen request"); return NULL; } - else if (rc) { + else if(rc) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send global-request packet for forward " "listen request"); @@ -509,34 +528,36 @@ channel_forward_listen(LIBSSH2_SESSION * session, const char *host, session->fwdLstn_state = libssh2_NB_state_sent; } - if (session->fwdLstn_state == libssh2_NB_state_sent) { + if(session->fwdLstn_state == libssh2_NB_state_sent) { unsigned char *data; size_t data_len; rc = _libssh2_packet_requirev(session, reply_codes, &data, &data_len, 0, NULL, 0, &session->fwdLstn_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); return NULL; - } else if (rc) { + } + else if(rc || (data_len < 1)) { _libssh2_error(session, LIBSSH2_ERROR_PROTO, "Unknown"); session->fwdLstn_state = libssh2_NB_state_idle; return NULL; } - if (data[0] == SSH_MSG_REQUEST_SUCCESS) { + if(data[0] == SSH_MSG_REQUEST_SUCCESS) { LIBSSH2_LISTENER *listener; listener = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_LISTENER)); - if (!listener) + if(!listener) _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for listener queue"); else { listener->host = LIBSSH2_ALLOC(session, session->fwdLstn_host_len + 1); - if (!listener->host) { + if(!listener->host) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Unable to allocate memory for listener queue"); + "Unable to allocate memory " + "for listener queue"); LIBSSH2_FREE(session, listener); listener = NULL; } @@ -544,10 +565,11 @@ channel_forward_listen(LIBSSH2_SESSION * session, const char *host, listener->session = session; memcpy(listener->host, host, session->fwdLstn_host_len); listener->host[session->fwdLstn_host_len] = 0; - if (data_len >= 5 && !port) { + if(data_len >= 5 && !port) { listener->port = _libssh2_ntohu32(data + 1); _libssh2_debug(session, LIBSSH2_TRACE_CONN, - "Dynamic tcpip-forward port allocated: %d", + "Dynamic tcpip-forward port " + "allocated: %d", listener->port); } else @@ -559,7 +581,7 @@ channel_forward_listen(LIBSSH2_SESSION * session, const char *host, /* append this to the parent's list of listeners */ _libssh2_list_add(&session->listeners, &listener->node); - if (bound_port) { + if(bound_port) { *bound_port = listener->port; } } @@ -569,7 +591,7 @@ channel_forward_listen(LIBSSH2_SESSION * session, const char *host, session->fwdLstn_state = libssh2_NB_state_idle; return listener; } - else if (data[0] == SSH_MSG_REQUEST_FAILURE) { + else if(data[0] == SSH_MSG_REQUEST_FAILURE) { LIBSSH2_FREE(session, data); _libssh2_error(session, LIBSSH2_ERROR_REQUEST_DENIED, "Unable to complete request for forward-listen"); @@ -624,13 +646,13 @@ int _libssh2_channel_forward_cancel(LIBSSH2_LISTENER *listener) int rc; int retcode = 0; - if (listener->chanFwdCncl_state == libssh2_NB_state_idle) { + if(listener->chanFwdCncl_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Cancelling tcpip-forward session for %s:%d", listener->host, listener->port); s = packet = LIBSSH2_ALLOC(session, packet_len); - if (!packet) { + if(!packet) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for setenv packet"); return LIBSSH2_ERROR_ALLOC; @@ -645,19 +667,20 @@ int _libssh2_channel_forward_cancel(LIBSSH2_LISTENER *listener) _libssh2_store_u32(&s, listener->port); listener->chanFwdCncl_state = libssh2_NB_state_created; - } else { + } + else { packet = listener->chanFwdCncl_data; } - if (listener->chanFwdCncl_state == libssh2_NB_state_created) { + if(listener->chanFwdCncl_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, packet, packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending forward request"); listener->chanFwdCncl_data = packet; return rc; } - else if (rc) { + else if(rc) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send global-request packet for forward " "listen request"); @@ -673,11 +696,11 @@ int _libssh2_channel_forward_cancel(LIBSSH2_LISTENER *listener) } queued = _libssh2_list_first(&listener->queue); - while (queued) { + while(queued) { LIBSSH2_CHANNEL *next = _libssh2_list_next(&queued->node); rc = _libssh2_channel_free(queued); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } queued = next; @@ -725,9 +748,9 @@ channel_forward_accept(LIBSSH2_LISTENER *listener) do { rc = _libssh2_transport_read(listener->session); - } while (rc > 0); + } while(rc > 0); - if (_libssh2_list_first(&listener->queue)) { + if(_libssh2_list_first(&listener->queue)) { LIBSSH2_CHANNEL *channel = _libssh2_list_first(&listener->queue); /* detach channel from listener's queue */ @@ -741,7 +764,7 @@ channel_forward_accept(LIBSSH2_LISTENER *listener) return channel; } - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(listener->session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for packet"); } @@ -786,7 +809,7 @@ static int channel_setenv(LIBSSH2_CHANNEL *channel, size_t data_len; int rc; - if (channel->setenv_state == libssh2_NB_state_idle) { + if(channel->setenv_state == libssh2_NB_state_idle) { /* 21 = packet_type(1) + channel_id(4) + request_len(4) + * request(3)"env" + want_reply(1) + varname_len(4) + value_len(4) */ channel->setenv_packet_len = varname_len + value_len + 21; @@ -802,7 +825,7 @@ static int channel_setenv(LIBSSH2_CHANNEL *channel, s = channel->setenv_packet = LIBSSH2_ALLOC(session, channel->setenv_packet_len); - if (!channel->setenv_packet) { + if(!channel->setenv_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory " "for setenv packet"); @@ -818,16 +841,17 @@ static int channel_setenv(LIBSSH2_CHANNEL *channel, channel->setenv_state = libssh2_NB_state_created; } - if (channel->setenv_state == libssh2_NB_state_created) { + if(channel->setenv_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, channel->setenv_packet, channel->setenv_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending setenv request"); return rc; - } else if (rc) { + } + else if(rc) { LIBSSH2_FREE(session, channel->setenv_packet); channel->setenv_packet = NULL; channel->setenv_state = libssh2_NB_state_idle; @@ -843,20 +867,25 @@ static int channel_setenv(LIBSSH2_CHANNEL *channel, channel->setenv_state = libssh2_NB_state_sent; } - if (channel->setenv_state == libssh2_NB_state_sent) { + if(channel->setenv_state == libssh2_NB_state_sent) { rc = _libssh2_packet_requirev(session, reply_codes, &data, &data_len, 1, channel->setenv_local_channel, 4, &channel-> setenv_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } - if (rc) { + if(rc) { channel->setenv_state = libssh2_NB_state_idle; return rc; } + else if(data_len < 1) { + channel->setenv_state = libssh2_NB_state_idle; + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet size"); + } - if (data[0] == SSH_MSG_CHANNEL_SUCCESS) { + if(data[0] == SSH_MSG_CHANNEL_SUCCESS) { LIBSSH2_FREE(session, data); channel->setenv_state = libssh2_NB_state_idle; return 0; @@ -907,7 +936,7 @@ static int channel_request_pty(LIBSSH2_CHANNEL *channel, { SSH_MSG_CHANNEL_SUCCESS, SSH_MSG_CHANNEL_FAILURE, 0 }; int rc; - if (channel->reqPTY_state == libssh2_NB_state_idle) { + if(channel->reqPTY_state == libssh2_NB_state_idle) { /* 41 = packet_type(1) + channel(4) + pty_req_len(4) + "pty_req"(7) + * want_reply(1) + term_len(4) + width(4) + height(4) + width_px(4) + * height_px(4) + modes_len(4) */ @@ -944,15 +973,16 @@ static int channel_request_pty(LIBSSH2_CHANNEL *channel, channel->reqPTY_state = libssh2_NB_state_created; } - if (channel->reqPTY_state == libssh2_NB_state_created) { + if(channel->reqPTY_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, channel->reqPTY_packet, channel->reqPTY_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending pty request"); return rc; - } else if (rc) { + } + else if(rc) { channel->reqPTY_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Unable to send pty-request packet"); @@ -962,16 +992,17 @@ static int channel_request_pty(LIBSSH2_CHANNEL *channel, channel->reqPTY_state = libssh2_NB_state_sent; } - if (channel->reqPTY_state == libssh2_NB_state_sent) { + if(channel->reqPTY_state == libssh2_NB_state_sent) { unsigned char *data; size_t data_len; unsigned char code; rc = _libssh2_packet_requirev(session, reply_codes, &data, &data_len, 1, channel->reqPTY_local_channel, 4, &channel->reqPTY_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc || data_len < 1) { channel->reqPTY_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_PROTO, "Failed to require the PTY package"); @@ -982,12 +1013,165 @@ static int channel_request_pty(LIBSSH2_CHANNEL *channel, LIBSSH2_FREE(session, data); channel->reqPTY_state = libssh2_NB_state_idle; - if (code == SSH_MSG_CHANNEL_SUCCESS) + if(code == SSH_MSG_CHANNEL_SUCCESS) + return 0; + } + + return _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_REQUEST_DENIED, + "Unable to complete request for " + "channel request-pty"); +} + +/** + * channel_request_auth_agent + * The actual re-entrant method which requests an auth agent. + * */ +static int channel_request_auth_agent(LIBSSH2_CHANNEL *channel, + const char *request_str, + int request_str_len) +{ + LIBSSH2_SESSION *session = channel->session; + unsigned char *s; + static const unsigned char reply_codes[3] = + { SSH_MSG_CHANNEL_SUCCESS, SSH_MSG_CHANNEL_FAILURE, 0 }; + int rc; + + if(channel->req_auth_agent_state == libssh2_NB_state_idle) { + /* Only valid options are "auth-agent-req" and + * "auth-agent-req_at_openssh.com" so we make sure it is not + * actually longer than the longest possible. */ + if(request_str_len > 26) { + return _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "request_str length too large"); + } + + /* + * Length: 24 or 36 = packet_type(1) + channel(4) + req_len(4) + + * request_str (variable) + want_reply (1) */ + channel->req_auth_agent_packet_len = 10 + request_str_len; + + /* Zero out the requireev state to reset */ + memset(&channel->req_auth_agent_requirev_state, 0, + sizeof(channel->req_auth_agent_requirev_state)); + + _libssh2_debug(session, LIBSSH2_TRACE_CONN, + "Requesting auth agent on channel %lu/%lu", + channel->local.id, channel->remote.id); + + /* + * byte SSH_MSG_CHANNEL_REQUEST + * uint32 recipient channel + * string "auth-agent-req" + * boolean want reply + * */ + s = channel->req_auth_agent_packet; + *(s++) = SSH_MSG_CHANNEL_REQUEST; + _libssh2_store_u32(&s, channel->remote.id); + _libssh2_store_str(&s, (char *)request_str, request_str_len); + *(s++) = 0x01; + + channel->req_auth_agent_state = libssh2_NB_state_created; + } + + if(channel->req_auth_agent_state == libssh2_NB_state_created) { + /* Send the packet, we can use sizeof() on the packet because it + * is always completely filled; there are no variable length fields. */ + rc = _libssh2_transport_send(session, channel->req_auth_agent_packet, + channel->req_auth_agent_packet_len, + NULL, 0); + + if(rc == LIBSSH2_ERROR_EAGAIN) { + _libssh2_error(session, rc, + "Would block sending auth-agent request"); + } + else if(rc) { + channel->req_auth_agent_state = libssh2_NB_state_idle; + return _libssh2_error(session, rc, + "Unable to send auth-agent request"); + } + _libssh2_htonu32(channel->req_auth_agent_local_channel, + channel->local.id); + channel->req_auth_agent_state = libssh2_NB_state_sent; + } + + if(channel->req_auth_agent_state == libssh2_NB_state_sent) { + unsigned char *data; + size_t data_len; + unsigned char code; + + rc = _libssh2_packet_requirev( + session, reply_codes, &data, &data_len, 1, + channel->req_auth_agent_local_channel, + 4, &channel->req_auth_agent_requirev_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + channel->req_auth_agent_state = libssh2_NB_state_idle; + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Failed to request auth-agent"); + } + + code = data[0]; + + LIBSSH2_FREE(session, data); + channel->req_auth_agent_state = libssh2_NB_state_idle; + + if(code == SSH_MSG_CHANNEL_SUCCESS) return 0; } return _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_REQUEST_DENIED, - "Unable to complete request for channel request-pty"); + "Unable to complete request for auth-agent"); +} + +/** + * libssh2_channel_request_auth_agent + * Requests that agent forwarding be enabled for the session. The + * request must be sent over a specific channel, which starts the agent + * listener on the remote side. Once the channel is closed, the agent + * listener continues to exist. + * */ +LIBSSH2_API int +libssh2_channel_request_auth_agent(LIBSSH2_CHANNEL *channel) +{ + int rc; + + if(!channel) + return LIBSSH2_ERROR_BAD_USE; + + /* The current RFC draft for agent forwarding says you're supposed to + * send "auth-agent-req," but most SSH servers out there right now + * actually expect "auth-agent-req@openssh.com", so we try that + * first. */ + if(channel->req_auth_agent_try_state == libssh2_NB_state_idle) { + BLOCK_ADJUST(rc, channel->session, + channel_request_auth_agent(channel, + "auth-agent-req@openssh.com", + 26)); + + /* If we failed (but not with EAGAIN), then we move onto + * the next step to try another request type. */ + if(rc != 0 && rc != LIBSSH2_ERROR_EAGAIN) + channel->req_auth_agent_try_state = libssh2_NB_state_sent; + } + + if(channel->req_auth_agent_try_state == libssh2_NB_state_sent) { + BLOCK_ADJUST(rc, channel->session, + channel_request_auth_agent(channel, + "auth-agent-req", 14)); + + /* If we failed without an EAGAIN, then move on with this + * state machine. */ + if(rc != 0 && rc != LIBSSH2_ERROR_EAGAIN) + channel->req_auth_agent_try_state = libssh2_NB_state_sent1; + } + + /* If things are good, reset the try state. */ + if(rc == 0) + channel->req_auth_agent_try_state = libssh2_NB_state_idle; + + return rc; } /* @@ -1021,7 +1205,7 @@ channel_request_pty_size(LIBSSH2_CHANNEL * channel, int width, int rc; int retcode = LIBSSH2_ERROR_PROTO; - if (channel->reqPTY_state == libssh2_NB_state_idle) { + if(channel->reqPTY_state == libssh2_NB_state_idle) { channel->reqPTY_packet_len = 39; /* Zero the whole thing out */ @@ -1048,15 +1232,16 @@ channel_request_pty_size(LIBSSH2_CHANNEL * channel, int width, channel->reqPTY_state = libssh2_NB_state_created; } - if (channel->reqPTY_state == libssh2_NB_state_created) { + if(channel->reqPTY_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, channel->reqPTY_packet, channel->reqPTY_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending window-change request"); return rc; - } else if (rc) { + } + else if(rc) { channel->reqPTY_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Unable to send window-change packet"); @@ -1106,7 +1291,7 @@ channel_x11_req(LIBSSH2_CHANNEL *channel, int single_connection, auth_cookie ? strlen(auth_cookie) : LIBSSH2_X11_RANDOM_COOKIE_LEN; int rc; - if (channel->reqX11_state == libssh2_NB_state_idle) { + if(channel->reqX11_state == libssh2_NB_state_idle) { /* 30 = packet_type(1) + channel(4) + x11_req_len(4) + "x11-req"(7) + * want_reply(1) + single_cnx(1) + proto_len(4) + cookie_len(4) + * screen_num(4) */ @@ -1126,7 +1311,7 @@ channel_x11_req(LIBSSH2_CHANNEL *channel, int single_connection, s = channel->reqX11_packet = LIBSSH2_ALLOC(session, channel->reqX11_packet_len); - if (!channel->reqX11_packet) { + if(!channel->reqX11_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for pty-request"); } @@ -1138,23 +1323,28 @@ channel_x11_req(LIBSSH2_CHANNEL *channel, int single_connection, *(s++) = 0x01; /* want_reply */ *(s++) = single_connection ? 0x01 : 0x00; - _libssh2_store_str(&s, auth_proto?auth_proto:"MIT-MAGIC-COOKIE-1", + _libssh2_store_str(&s, auth_proto ? auth_proto : "MIT-MAGIC-COOKIE-1", proto_len); _libssh2_store_u32(&s, cookie_len); - if (auth_cookie) { + if(auth_cookie) { memcpy(s, auth_cookie, cookie_len); - } else { + } + else { int i; /* note: the extra +1 below is necessary since the sprintf() loop will always write 3 bytes so the last one will write the trailing zero at the LIBSSH2_X11_RANDOM_COOKIE_LEN/2 border */ - unsigned char buffer[(LIBSSH2_X11_RANDOM_COOKIE_LEN / 2) +1]; + unsigned char buffer[(LIBSSH2_X11_RANDOM_COOKIE_LEN / 2) + 1]; - _libssh2_random(buffer, LIBSSH2_X11_RANDOM_COOKIE_LEN / 2); + if(_libssh2_random(buffer, LIBSSH2_X11_RANDOM_COOKIE_LEN / 2)) { + return _libssh2_error(session, LIBSSH2_ERROR_RANDGEN, + "Unable to get random bytes " + "for x11-req cookie"); + } for(i = 0; i < (LIBSSH2_X11_RANDOM_COOKIE_LEN / 2); i++) { - sprintf((char *)&s[i*2], "%02X", buffer[i]); + snprintf((char *)&s[i*2], 3, "%02X", buffer[i]); } } s += cookie_len; @@ -1163,16 +1353,16 @@ channel_x11_req(LIBSSH2_CHANNEL *channel, int single_connection, channel->reqX11_state = libssh2_NB_state_created; } - if (channel->reqX11_state == libssh2_NB_state_created) { + if(channel->reqX11_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, channel->reqX11_packet, channel->reqX11_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending X11-req packet"); return rc; } - if (rc) { + if(rc) { LIBSSH2_FREE(session, channel->reqX11_packet); channel->reqX11_packet = NULL; channel->reqX11_state = libssh2_NB_state_idle; @@ -1187,7 +1377,7 @@ channel_x11_req(LIBSSH2_CHANNEL *channel, int single_connection, channel->reqX11_state = libssh2_NB_state_sent; } - if (channel->reqX11_state == libssh2_NB_state_sent) { + if(channel->reqX11_state == libssh2_NB_state_sent) { size_t data_len; unsigned char *data; unsigned char code; @@ -1195,9 +1385,10 @@ channel_x11_req(LIBSSH2_CHANNEL *channel, int single_connection, rc = _libssh2_packet_requirev(session, reply_codes, &data, &data_len, 1, channel->reqX11_local_channel, 4, &channel->reqX11_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc || data_len < 1) { channel->reqX11_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "waiting for x11-req response packet"); @@ -1207,7 +1398,7 @@ channel_x11_req(LIBSSH2_CHANNEL *channel, int single_connection, LIBSSH2_FREE(session, data); channel->reqX11_state = libssh2_NB_state_idle; - if (code == SSH_MSG_CHANNEL_SUCCESS) + if(code == SSH_MSG_CHANNEL_SUCCESS) return 0; } @@ -1252,12 +1443,12 @@ _libssh2_channel_process_startup(LIBSSH2_CHANNEL *channel, { SSH_MSG_CHANNEL_SUCCESS, SSH_MSG_CHANNEL_FAILURE, 0 }; int rc; - if (channel->process_state == libssh2_NB_state_end) { + if(channel->process_state == libssh2_NB_state_end) { return _libssh2_error(session, LIBSSH2_ERROR_BAD_USE, "Channel can not be reused"); } - if (channel->process_state == libssh2_NB_state_idle) { + if(channel->process_state == libssh2_NB_state_idle) { /* 10 = packet_type(1) + channel(4) + request_len(4) + want_reply(1) */ channel->process_packet_len = request_len + 10; @@ -1265,16 +1456,16 @@ _libssh2_channel_process_startup(LIBSSH2_CHANNEL *channel, memset(&channel->process_packet_requirev_state, 0, sizeof(channel->process_packet_requirev_state)); - if (message) + if(message) channel->process_packet_len += + 4; _libssh2_debug(session, LIBSSH2_TRACE_CONN, "starting request(%s) on channel %lu/%lu, message=%s", request, channel->local.id, channel->remote.id, - message?message:""); + message ? message : ""); s = channel->process_packet = LIBSSH2_ALLOC(session, channel->process_packet_len); - if (!channel->process_packet) + if(!channel->process_packet) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory " "for channel-process request"); @@ -1284,23 +1475,23 @@ _libssh2_channel_process_startup(LIBSSH2_CHANNEL *channel, _libssh2_store_str(&s, request, request_len); *(s++) = 0x01; - if (message) + if(message) _libssh2_store_u32(&s, message_len); channel->process_state = libssh2_NB_state_created; } - if (channel->process_state == libssh2_NB_state_created) { + if(channel->process_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, channel->process_packet, channel->process_packet_len, (unsigned char *)message, message_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending channel request"); return rc; } - else if (rc) { + else if(rc) { LIBSSH2_FREE(session, channel->process_packet); channel->process_packet = NULL; channel->process_state = libssh2_NB_state_end; @@ -1315,16 +1506,17 @@ _libssh2_channel_process_startup(LIBSSH2_CHANNEL *channel, channel->process_state = libssh2_NB_state_sent; } - if (channel->process_state == libssh2_NB_state_sent) { + if(channel->process_state == libssh2_NB_state_sent) { unsigned char *data; size_t data_len; unsigned char code; rc = _libssh2_packet_requirev(session, reply_codes, &data, &data_len, 1, channel->process_local_channel, 4, &channel->process_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc || data_len < 1) { channel->process_state = libssh2_NB_state_end; return _libssh2_error(session, rc, "Failed waiting for channel success"); @@ -1334,7 +1526,7 @@ _libssh2_channel_process_startup(LIBSSH2_CHANNEL *channel, LIBSSH2_FREE(session, data); channel->process_state = libssh2_NB_state_end; - if (code == SSH_MSG_CHANNEL_SUCCESS) + if(code == SSH_MSG_CHANNEL_SUCCESS) return 0; } @@ -1387,30 +1579,54 @@ libssh2_channel_set_blocking(LIBSSH2_CHANNEL * channel, int blocking) int _libssh2_channel_flush(LIBSSH2_CHANNEL *channel, int streamid) { - if (channel->flush_state == libssh2_NB_state_idle) { + if(channel->flush_state == libssh2_NB_state_idle) { LIBSSH2_PACKET *packet = _libssh2_list_first(&channel->session->packets); channel->flush_refund_bytes = 0; channel->flush_flush_bytes = 0; - while (packet) { + while(packet) { + unsigned char packet_type; LIBSSH2_PACKET *next = _libssh2_list_next(&packet->node); - unsigned char packet_type = packet->data[0]; - if (((packet_type == SSH_MSG_CHANNEL_DATA) - || (packet_type == SSH_MSG_CHANNEL_EXTENDED_DATA)) - && (_libssh2_ntohu32(packet->data + 1) == channel->local.id)) { + if(packet->data_len < 1) { + packet = next; + _libssh2_debug(channel->session, LIBSSH2_TRACE_ERROR, + "Unexpected packet length"); + continue; + } + + packet_type = packet->data[0]; + + if(((packet_type == SSH_MSG_CHANNEL_DATA) + || (packet_type == SSH_MSG_CHANNEL_EXTENDED_DATA)) + && ((packet->data_len >= 5) + && (_libssh2_ntohu32(packet->data + 1) + == channel->local.id))) { /* It's our channel at least */ - long packet_stream_id = - (packet_type == SSH_MSG_CHANNEL_DATA) ? 0 : - _libssh2_ntohu32(packet->data + 5); - if ((streamid == LIBSSH2_CHANNEL_FLUSH_ALL) + int packet_stream_id; + + if(packet_type == SSH_MSG_CHANNEL_DATA) { + packet_stream_id = 0; + } + else if(packet->data_len >= 9) { + packet_stream_id = _libssh2_ntohu32(packet->data + 5); + } + else { + channel->flush_state = libssh2_NB_state_idle; + return _libssh2_error(channel->session, + LIBSSH2_ERROR_PROTO, + "Unexpected packet length"); + } + + if((streamid == LIBSSH2_CHANNEL_FLUSH_ALL) || ((packet_type == SSH_MSG_CHANNEL_EXTENDED_DATA) && ((streamid == LIBSSH2_CHANNEL_FLUSH_EXTENDED_DATA) || (streamid == packet_stream_id))) || ((packet_type == SSH_MSG_CHANNEL_DATA) && (streamid == 0))) { - int bytes_to_flush = packet->data_len - packet->data_head; + size_t bytes_to_flush = packet->data_len - + packet->data_head; _libssh2_debug(channel->session, LIBSSH2_TRACE_CONN, "Flushing %d bytes of data from stream " @@ -1438,13 +1654,12 @@ _libssh2_channel_flush(LIBSSH2_CHANNEL *channel, int streamid) channel->read_avail -= channel->flush_flush_bytes; channel->remote.window_size -= channel->flush_flush_bytes; - if (channel->flush_refund_bytes) { - int rc; - - rc = _libssh2_channel_receive_window_adjust(channel, - channel->flush_refund_bytes, - 1, NULL); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(channel->flush_refund_bytes) { + int rc = + _libssh2_channel_receive_window_adjust(channel, + channel->flush_refund_bytes, + 1, NULL); + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; } @@ -1510,41 +1725,42 @@ libssh2_channel_get_exit_signal(LIBSSH2_CHANNEL *channel, { size_t namelen = 0; - if (channel) { + if(channel) { LIBSSH2_SESSION *session = channel->session; - if (channel->exit_signal) { + if(channel->exit_signal) { namelen = strlen(channel->exit_signal); - if (exitsignal) { - *exitsignal = LIBSSH2_ALLOC(session, namelen + 1); - if (!*exitsignal) { + if(exitsignal) { + *exitsignal = LIBSSH2_ALLOC(session, namelen + 1); + if(!*exitsignal) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for signal name"); } memcpy(*exitsignal, channel->exit_signal, namelen); (*exitsignal)[namelen] = '\0'; } - if (exitsignal_len) + if(exitsignal_len) *exitsignal_len = namelen; - } else { - if (exitsignal) + } + else { + if(exitsignal) *exitsignal = NULL; - if (exitsignal_len) + if(exitsignal_len) *exitsignal_len = 0; } /* TODO: set error message and language tag */ - if (errmsg) + if(errmsg) *errmsg = NULL; - if (errmsg_len) + if(errmsg_len) *errmsg_len = 0; - if (langtag) + if(langtag) *langtag = NULL; - if (langtag_len) + if(langtag_len) *langtag_len = 0; } @@ -1571,8 +1787,8 @@ _libssh2_channel_receive_window_adjust(LIBSSH2_CHANNEL * channel, if(store) *store = channel->remote.window_size; - if (channel->adjust_state == libssh2_NB_state_idle) { - if (!force + if(channel->adjust_state == libssh2_NB_state_idle) { + if(!force && (adjustment + channel->adjust_queue < LIBSSH2_CHANNEL_MINADJUST)) { _libssh2_debug(channel->session, LIBSSH2_TRACE_CONN, @@ -1583,7 +1799,7 @@ _libssh2_channel_receive_window_adjust(LIBSSH2_CHANNEL * channel, return 0; } - if (!adjustment && !channel->adjust_queue) { + if(!adjustment && !channel->adjust_queue) { return 0; } @@ -1604,12 +1820,12 @@ _libssh2_channel_receive_window_adjust(LIBSSH2_CHANNEL * channel, rc = _libssh2_transport_send(channel->session, channel->adjust_adjust, 9, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(channel->session, rc, "Would block sending window adjust"); return rc; } - else if (rc) { + else if(rc) { channel->adjust_queue = adjustment; return _libssh2_error(channel->session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send transfer-window adjustment " @@ -1654,7 +1870,7 @@ libssh2_channel_receive_window_adjust(LIBSSH2_CHANNEL *channel, /* stupid - but this is how it was made to work before and this is just kept for backwards compatibility */ - return rc?(unsigned long)rc:window; + return rc ? (unsigned long)rc : window; } /* @@ -1688,7 +1904,7 @@ libssh2_channel_receive_window_adjust2(LIBSSH2_CHANNEL *channel, int _libssh2_channel_extended_data(LIBSSH2_CHANNEL *channel, int ignore_mode) { - if (channel->extData2_state == libssh2_NB_state_idle) { + if(channel->extData2_state == libssh2_NB_state_idle) { _libssh2_debug(channel->session, LIBSSH2_TRACE_CONN, "Setting channel %lu/%lu handle_extended_data" " mode to %d", @@ -1698,8 +1914,8 @@ _libssh2_channel_extended_data(LIBSSH2_CHANNEL *channel, int ignore_mode) channel->extData2_state = libssh2_NB_state_created; } - if (channel->extData2_state == libssh2_NB_state_idle) { - if (ignore_mode == LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE) { + if(channel->extData2_state == libssh2_NB_state_idle) { + if(ignore_mode == LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE) { int rc = _libssh2_channel_flush(channel, LIBSSH2_CHANNEL_FLUSH_EXTENDED_DATA); @@ -1766,8 +1982,8 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, { LIBSSH2_SESSION *session = channel->session; int rc; - int bytes_read = 0; - int bytes_want; + size_t bytes_read = 0; + size_t bytes_want; int unlink_packet; LIBSSH2_PACKET *read_packet; LIBSSH2_PACKET *read_next; @@ -1779,11 +1995,13 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, stream_id); /* expand the receiving window first if it has become too narrow */ - if( (channel->read_state == libssh2_NB_state_jump1) || - (channel->remote.window_size < channel->remote.window_size_initial / 4 * 3 + buflen) ) { + if((channel->read_state == libssh2_NB_state_jump1) || + (channel->remote.window_size < + channel->remote.window_size_initial / 4 * 3 + buflen) ) { - uint32_t adjustment = channel->remote.window_size_initial + buflen - channel->remote.window_size; - if (adjustment < LIBSSH2_CHANNEL_MINADJUST) + uint32_t adjustment = channel->remote.window_size_initial + buflen - + channel->remote.window_size; + if(adjustment < LIBSSH2_CHANNEL_MINADJUST) adjustment = LIBSSH2_CHANNEL_MINADJUST; /* the actual window adjusting may not finish so we need to deal with @@ -1791,7 +2009,7 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, channel->read_state = libssh2_NB_state_jump1; rc = _libssh2_channel_receive_window_adjust(channel, adjustment, 0, NULL); - if (rc) + if(rc) return rc; channel->read_state = libssh2_NB_state_idle; @@ -1801,13 +2019,13 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, produces faster transfers. */ do { rc = _libssh2_transport_read(session); - } while (rc > 0); + } while(rc > 0); - if ((rc < 0) && (rc != LIBSSH2_ERROR_EAGAIN)) + if((rc < 0) && (rc != LIBSSH2_ERROR_EAGAIN)) return _libssh2_error(session, rc, "transport read"); read_packet = _libssh2_list_first(&session->packets); - while (read_packet && (bytes_read < (int) buflen)) { + while(read_packet && (bytes_read < buflen)) { /* previously this loop condition also checked for !channel->remote.close but we cannot let it do this: @@ -1821,6 +2039,13 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, /* In case packet gets destroyed during this iteration */ read_next = _libssh2_list_next(&readpkt->node); + if(readpkt->data_len < 5) { + read_packet = read_next; + _libssh2_debug(channel->session, LIBSSH2_TRACE_ERROR, + "Unexpected packet length"); + continue; + } + channel->read_local_id = _libssh2_ntohu32(readpkt->data + 1); @@ -1831,9 +2056,10 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, * or the standard stream with extended_data_merge * enabled and data was available */ - if ((stream_id + if((stream_id && (readpkt->data[0] == SSH_MSG_CHANNEL_EXTENDED_DATA) && (channel->local.id == channel->read_local_id) + && (readpkt->data_len >= 9) && (stream_id == (int) _libssh2_ntohu32(readpkt->data + 5))) || (!stream_id && (readpkt->data[0] == SSH_MSG_CHANNEL_DATA) && (channel->local.id == channel->read_local_id)) @@ -1847,7 +2073,7 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, bytes_want = buflen - bytes_read; unlink_packet = FALSE; - if (bytes_want >= (int) (readpkt->data_len - readpkt->data_head)) { + if(bytes_want >= (readpkt->data_len - readpkt->data_head)) { /* we want more than this node keeps, so adjust the number and delete this node after the copy */ bytes_want = readpkt->data_len - readpkt->data_head; @@ -1869,7 +2095,7 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, bytes_read += bytes_want; /* if drained, remove from list */ - if (unlink_packet) { + if(unlink_packet) { /* detach readpkt from session->packets list */ _libssh2_list_remove(&readpkt->node); @@ -1882,7 +2108,7 @@ ssize_t _libssh2_channel_read(LIBSSH2_CHANNEL *channel, int stream_id, read_packet = read_next; } - if (!bytes_read) { + if(!bytes_read) { /* If the channel is already at EOF or even closed, we need to signal that back. We may have gotten that info while draining the incoming transport layer until EAGAIN so we must not be fooled by that @@ -1950,13 +2176,24 @@ _libssh2_channel_packet_data_len(LIBSSH2_CHANNEL * channel, int stream_id) { LIBSSH2_SESSION *session = channel->session; LIBSSH2_PACKET *read_packet; + LIBSSH2_PACKET *next_packet; uint32_t read_local_id; read_packet = _libssh2_list_first(&session->packets); - if (read_packet == NULL) + if(read_packet == NULL) return 0; - while (read_packet) { + while(read_packet) { + + next_packet = _libssh2_list_next(&read_packet->node); + + if(read_packet->data_len < 5) { + read_packet = next_packet; + _libssh2_debug(channel->session, LIBSSH2_TRACE_ERROR, + "Unexpected packet length"); + continue; + } + read_local_id = _libssh2_ntohu32(read_packet->data + 1); /* @@ -1966,9 +2203,10 @@ _libssh2_channel_packet_data_len(LIBSSH2_CHANNEL * channel, int stream_id) * or the standard stream with extended_data_merge * enabled and data was available */ - if ((stream_id + if((stream_id && (read_packet->data[0] == SSH_MSG_CHANNEL_EXTENDED_DATA) && (channel->local.id == read_local_id) + && (read_packet->data_len >= 9) && (stream_id == (int) _libssh2_ntohu32(read_packet->data + 5))) || (!stream_id @@ -1979,11 +2217,11 @@ _libssh2_channel_packet_data_len(LIBSSH2_CHANNEL * channel, int stream_id) && (read_packet->data[0] == SSH_MSG_CHANNEL_EXTENDED_DATA) && (channel->local.id == read_local_id) && (channel->remote.extended_data_ignore_mode - == LIBSSH2_CHANNEL_EXTENDED_DATA_MERGE))) - { + == LIBSSH2_CHANNEL_EXTENDED_DATA_MERGE))) { return (read_packet->data_len - read_packet->data_head); } - read_packet = _libssh2_list_next(&read_packet->node); + + read_packet = next_packet; } return 0; @@ -2017,7 +2255,7 @@ _libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id, if(buflen > 32700) buflen = 32700; - if (channel->write_state == libssh2_NB_state_idle) { + if(channel->write_state == libssh2_NB_state_idle) { unsigned char *s = channel->write_packet; _libssh2_debug(channel->session, LIBSSH2_TRACE_CONN, @@ -2025,11 +2263,11 @@ _libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id, (int) buflen, channel->local.id, channel->remote.id, stream_id); - if (channel->local.close) + if(channel->local.close) return _libssh2_error(channel->session, LIBSSH2_ERROR_CHANNEL_CLOSED, "We've already closed this channel"); - else if (channel->local.eof) + else if(channel->local.eof) return _libssh2_error(channel->session, LIBSSH2_ERROR_CHANNEL_EOF_SENT, "EOF has already been received, " @@ -2039,7 +2277,7 @@ _libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id, * pending window adjust packets */ do rc = _libssh2_transport_read(session); - while (rc > 0); + while(rc > 0); if((rc < 0) && (rc != LIBSSH2_ERROR_EAGAIN)) { return _libssh2_error(channel->session, rc, @@ -2055,7 +2293,7 @@ _libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id, */ session->socket_block_directions = LIBSSH2_SESSION_BLOCK_INBOUND; - return (rc==LIBSSH2_ERROR_EAGAIN?rc:0); + return (rc == LIBSSH2_ERROR_EAGAIN?rc:0); } channel->write_bufwrite = buflen; @@ -2063,12 +2301,12 @@ _libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id, *(s++) = stream_id ? SSH_MSG_CHANNEL_EXTENDED_DATA : SSH_MSG_CHANNEL_DATA; _libssh2_store_u32(&s, channel->remote.id); - if (stream_id) + if(stream_id) _libssh2_store_u32(&s, stream_id); /* Don't exceed the remote end's limits */ /* REMEMBER local means local as the SOURCE of the data */ - if (channel->write_bufwrite > channel->local.window_size) { + if(channel->write_bufwrite > channel->local.window_size) { _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Splitting write block due to %lu byte " "window_size on %lu/%lu/%d", @@ -2076,7 +2314,7 @@ _libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id, channel->remote.id, stream_id); channel->write_bufwrite = channel->local.window_size; } - if (channel->write_bufwrite > channel->local.packet_size) { + if(channel->write_bufwrite > channel->local.packet_size) { _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Splitting write block due to %lu byte " "packet_size on %lu/%lu/%d", @@ -2097,15 +2335,15 @@ _libssh2_channel_write(LIBSSH2_CHANNEL *channel, int stream_id, channel->write_state = libssh2_NB_state_created; } - if (channel->write_state == libssh2_NB_state_created) { + if(channel->write_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, channel->write_packet, channel->write_packet_len, buf, channel->write_bufwrite); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return _libssh2_error(session, rc, "Unable to send channel data"); } - else if (rc) { + else if(rc) { channel->write_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Unable to send channel data"); @@ -2164,17 +2402,18 @@ static int channel_send_eof(LIBSSH2_CHANNEL *channel) unsigned char packet[5]; /* packet_type(1) + channelno(4) */ int rc; - _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Sending EOF on channel %lu/%lu", + _libssh2_debug(session, LIBSSH2_TRACE_CONN, + "Sending EOF on channel %lu/%lu", channel->local.id, channel->remote.id); packet[0] = SSH_MSG_CHANNEL_EOF; _libssh2_htonu32(packet + 1, channel->remote.id); rc = _libssh2_transport_send(session, packet, 5, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending EOF"); return rc; } - else if (rc) { + else if(rc) { return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send EOF on channel"); } @@ -2210,6 +2449,7 @@ libssh2_channel_eof(LIBSSH2_CHANNEL * channel) { LIBSSH2_SESSION *session; LIBSSH2_PACKET *packet; + LIBSSH2_PACKET *next_packet; if(!channel) return LIBSSH2_ERROR_BAD_USE; @@ -2217,14 +2457,25 @@ libssh2_channel_eof(LIBSSH2_CHANNEL * channel) session = channel->session; packet = _libssh2_list_first(&session->packets); - while (packet) { - if (((packet->data[0] == SSH_MSG_CHANNEL_DATA) - || (packet->data[0] == SSH_MSG_CHANNEL_EXTENDED_DATA)) - && (channel->local.id == _libssh2_ntohu32(packet->data + 1))) { + while(packet) { + + next_packet = _libssh2_list_next(&packet->node); + + if(packet->data_len < 1) { + packet = next_packet; + _libssh2_debug(channel->session, LIBSSH2_TRACE_ERROR, + "Unexpected packet length"); + continue; + } + + if(((packet->data[0] == SSH_MSG_CHANNEL_DATA) + || (packet->data[0] == SSH_MSG_CHANNEL_EXTENDED_DATA)) + && ((packet->data_len >= 5) + && (channel->local.id == _libssh2_ntohu32(packet->data + 1)))) { /* There's data waiting to be read yet, mask the EOF status */ return 0; } - packet = _libssh2_list_next(&packet->node); + packet = next_packet; } return channel->remote.eof; @@ -2240,9 +2491,9 @@ static int channel_wait_eof(LIBSSH2_CHANNEL *channel) LIBSSH2_SESSION *session = channel->session; int rc; - if (channel->wait_eof_state == libssh2_NB_state_idle) { + if(channel->wait_eof_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_CONN, - "Awaiting close of channel %lu/%lu", channel->local.id, + "Awaiting EOF for channel %lu/%lu", channel->local.id, channel->remote.id); channel->wait_eof_state = libssh2_NB_state_created; @@ -2253,19 +2504,26 @@ static int channel_wait_eof(LIBSSH2_CHANNEL *channel) * Either the EOF will be set or network timeout will occur. */ do { - if (channel->remote.eof) { + if(channel->remote.eof) { break; } + + if((channel->remote.window_size == channel->read_avail) && + session->api_block_mode) + return _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_WINDOW_FULL, + "Receiving channel window " + "has been exhausted"); + rc = _libssh2_transport_read(session); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } - else if (rc < 0) { + else if(rc < 0) { channel->wait_eof_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "_libssh2_transport_read() bailed out!"); } - } while (1); + } while(1); channel->wait_eof_state = libssh2_NB_state_idle; @@ -2294,16 +2552,17 @@ int _libssh2_channel_close(LIBSSH2_CHANNEL * channel) LIBSSH2_SESSION *session = channel->session; int rc = 0; - if (channel->local.close) { + if(channel->local.close) { /* Already closed, act like we sent another close, * even though we didn't... shhhhhh */ channel->close_state = libssh2_NB_state_idle; return 0; } - if (!channel->local.eof) { - if ((rc = channel_send_eof(channel))) { - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(!channel->local.eof) { + rc = channel_send_eof(channel); + if(rc) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } _libssh2_error(session, rc, @@ -2314,7 +2573,7 @@ int _libssh2_channel_close(LIBSSH2_CHANNEL * channel) /* ignore if we have received a remote eof or not, as it is now too late for us to wait for it. Continue closing! */ - if (channel->close_state == libssh2_NB_state_idle) { + if(channel->close_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Closing channel %lu/%lu", channel->local.id, channel->remote.id); @@ -2324,41 +2583,43 @@ int _libssh2_channel_close(LIBSSH2_CHANNEL * channel) channel->close_state = libssh2_NB_state_created; } - if (channel->close_state == libssh2_NB_state_created) { + if(channel->close_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, channel->close_packet, 5, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, rc, "Would block sending close-channel"); return rc; - } else if (rc) { + } + else if(rc) { _libssh2_error(session, rc, "Unable to send close-channel request, " "but closing anyway"); /* skip waiting for the response and fall through to LIBSSH2_CHANNEL_CLOSE below */ - } else + } + else channel->close_state = libssh2_NB_state_sent; } - if (channel->close_state == libssh2_NB_state_sent) { + if(channel->close_state == libssh2_NB_state_sent) { /* We must wait for the remote SSH_MSG_CHANNEL_CLOSE message */ - while (!channel->remote.close && !rc && + while(!channel->remote.close && !rc && (session->socket_state != LIBSSH2_SOCKET_DISCONNECTED)) rc = _libssh2_transport_read(session); } if(rc != LIBSSH2_ERROR_EAGAIN) { - /* set the local close state first when we're perfectly confirmed to not - do any more EAGAINs */ + /* set the local close state first when we're perfectly confirmed to + not do any more EAGAINs */ channel->local.close = 1; /* We call the callback last in this function to make it keep the local data as long as EAGAIN is returned. */ - if (channel->close_cb) { + if(channel->close_cb) { LIBSSH2_CHANNEL_CLOSE(session, channel); } @@ -2366,7 +2627,7 @@ int _libssh2_channel_close(LIBSSH2_CHANNEL * channel) } /* return 0 or an error */ - return rc>=0?0:rc; + return rc >= 0 ? 0 : rc; } /* @@ -2396,13 +2657,13 @@ static int channel_wait_closed(LIBSSH2_CHANNEL *channel) LIBSSH2_SESSION *session = channel->session; int rc; - if (!libssh2_channel_eof(channel)) { + if(!channel->remote.eof) { return _libssh2_error(session, LIBSSH2_ERROR_INVAL, "libssh2_channel_wait_closed() invoked when " "channel is not in EOF state"); } - if (channel->wait_closed_state == libssh2_NB_state_idle) { + if(channel->wait_closed_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Awaiting close of channel %lu/%lu", channel->local.id, channel->remote.id); @@ -2414,13 +2675,13 @@ static int channel_wait_closed(LIBSSH2_CHANNEL *channel) * While channel is not closed, read more packets from the network. * Either the channel will be closed or network timeout will occur. */ - if (!channel->remote.close) { + if(!channel->remote.close) { do { rc = _libssh2_transport_read(session); - if (channel->remote.close) + if(channel->remote.close) /* it is now closed, move on! */ break; - } while (rc > 0); + } while(rc > 0); if(rc < 0) return rc; } @@ -2465,7 +2726,7 @@ int _libssh2_channel_free(LIBSSH2_CHANNEL *channel) assert(session); - if (channel->free_state == libssh2_NB_state_idle) { + if(channel->free_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Freeing channel %lu/%lu resources", channel->local.id, channel->remote.id); @@ -2474,7 +2735,7 @@ int _libssh2_channel_free(LIBSSH2_CHANNEL *channel) } /* Allow channel freeing even when the socket has lost its connection */ - if (!channel->local.close + if(!channel->local.close && (session->socket_state == LIBSSH2_SOCKET_CONNECTED)) { rc = _libssh2_channel_close(channel); @@ -2487,7 +2748,7 @@ int _libssh2_channel_free(LIBSSH2_CHANNEL *channel) channel->free_state = libssh2_NB_state_idle; - if (channel->exit_signal) { + if(channel->exit_signal) { LIBSSH2_FREE(session, channel->exit_signal); } @@ -2499,7 +2760,7 @@ int _libssh2_channel_free(LIBSSH2_CHANNEL *channel) /* Clear out packets meant for this channel */ _libssh2_htonu32(channel_id, channel->local.id); - while ((_libssh2_packet_ask(session, SSH_MSG_CHANNEL_DATA, &data, + while((_libssh2_packet_ask(session, SSH_MSG_CHANNEL_DATA, &data, &data_len, 1, channel_id, 4) >= 0) || (_libssh2_packet_ask(session, SSH_MSG_CHANNEL_EXTENDED_DATA, &data, @@ -2508,7 +2769,7 @@ int _libssh2_channel_free(LIBSSH2_CHANNEL *channel) } /* free "channel_type" */ - if (channel->channel_type) { + if(channel->channel_type) { LIBSSH2_FREE(session, channel->channel_type); } @@ -2518,13 +2779,13 @@ int _libssh2_channel_free(LIBSSH2_CHANNEL *channel) /* * Make sure all memory used in the state variables are free */ - if (channel->setenv_packet) { + if(channel->setenv_packet) { LIBSSH2_FREE(session, channel->setenv_packet); } - if (channel->reqX11_packet) { + if(channel->reqX11_packet) { LIBSSH2_FREE(session, channel->reqX11_packet); } - if (channel->process_packet) { + if(channel->process_packet) { LIBSSH2_FREE(session, channel->process_packet); } @@ -2569,25 +2830,38 @@ libssh2_channel_window_read_ex(LIBSSH2_CHANNEL *channel, if(!channel) return 0; /* no channel, no window! */ - if (window_size_initial) { + if(window_size_initial) { *window_size_initial = channel->remote.window_size_initial; } - if (read_avail) { + if(read_avail) { size_t bytes_queued = 0; + LIBSSH2_PACKET *next_packet; LIBSSH2_PACKET *packet = _libssh2_list_first(&channel->session->packets); - while (packet) { - unsigned char packet_type = packet->data[0]; + while(packet) { + unsigned char packet_type; + next_packet = _libssh2_list_next(&packet->node); + + if(packet->data_len < 1) { + packet = next_packet; + _libssh2_debug(channel->session, LIBSSH2_TRACE_ERROR, + "Unexpected packet length"); + continue; + } + + packet_type = packet->data[0]; - if (((packet_type == SSH_MSG_CHANNEL_DATA) - || (packet_type == SSH_MSG_CHANNEL_EXTENDED_DATA)) - && (_libssh2_ntohu32(packet->data + 1) == channel->local.id)) { + if(((packet_type == SSH_MSG_CHANNEL_DATA) + || (packet_type == SSH_MSG_CHANNEL_EXTENDED_DATA)) + && ((packet->data_len >= 5) + && (_libssh2_ntohu32(packet->data + 1) == + channel->local.id))) { bytes_queued += packet->data_len - packet->data_head; } - packet = _libssh2_list_next(&packet->node); + packet = next_packet; } *read_avail = bytes_queued; @@ -2611,7 +2885,7 @@ libssh2_channel_window_write_ex(LIBSSH2_CHANNEL *channel, if(!channel) return 0; /* no channel, no window! */ - if (window_size_initial) { + if(window_size_initial) { /* For locally initiated channels this is very often 0, so it's not * *that* useful as information goes */ *window_size_initial = channel->local.window_size_initial; diff --git a/vendor/libssh2/src/comp.c b/vendor/libssh2/src/comp.c index 4560188bb7..90ab30c89d 100644 --- a/vendor/libssh2/src/comp.c +++ b/vendor/libssh2/src/comp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2004-2007, Sara Golemon +/* Copyright (c) 2004-2007, 2019, Sara Golemon * Copyright (c) 2010-2014, Daniel Stenberg * All rights reserved. * @@ -38,7 +38,8 @@ #include "libssh2_priv.h" #ifdef LIBSSH2_HAVE_ZLIB -# include +#include +#undef compress /* dodge name clash with ZLIB macro */ #endif #include "comp.h" @@ -142,7 +143,7 @@ comp_method_zlib_init(LIBSSH2_SESSION * session, int compr, int status; strm = LIBSSH2_CALLOC(session, sizeof(z_stream)); - if (!strm) { + if(!strm) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "zlib compression/decompression"); @@ -151,15 +152,16 @@ comp_method_zlib_init(LIBSSH2_SESSION * session, int compr, strm->opaque = (voidpf) session; strm->zalloc = (alloc_func) comp_method_zlib_alloc; strm->zfree = (free_func) comp_method_zlib_free; - if (compr) { + if(compr) { /* deflate */ status = deflateInit(strm, Z_DEFAULT_COMPRESSION); - } else { + } + else { /* inflate */ status = inflateInit(strm); } - if (status != Z_OK) { + if(status != Z_OK) { LIBSSH2_FREE(session, strm); _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "unhandled zlib error %d", status); @@ -197,13 +199,14 @@ comp_method_zlib_comp(LIBSSH2_SESSION *session, status = deflate(strm, Z_PARTIAL_FLUSH); - if ((status == Z_OK) && (strm->avail_out > 0)) { + if((status == Z_OK) && (strm->avail_out > 0)) { *dest_len = out_maxlen - strm->avail_out; return 0; } _libssh2_debug(session, LIBSSH2_TRACE_TRANS, - "unhandled zlib compression error %d, avail_out", status, strm->avail_out); + "unhandled zlib compression error %d, avail_out", + status, strm->avail_out); return _libssh2_error(session, LIBSSH2_ERROR_ZLIB, "compression failure"); } @@ -224,18 +227,23 @@ comp_method_zlib_decomp(LIBSSH2_SESSION * session, /* A short-term alloc of a full data chunk is better than a series of reallocs */ char *out; - int out_maxlen = 4 * src_len; + size_t out_maxlen = src_len; + + if(src_len <= SIZE_MAX / 4) + out_maxlen = src_len * 4; + else + out_maxlen = payload_limit; /* If strm is null, then we have not yet been initialized. */ - if (strm == NULL) + if(strm == NULL) return _libssh2_error(session, LIBSSH2_ERROR_COMPRESS, "decompression uninitialized");; /* In practice they never come smaller than this */ - if (out_maxlen < 25) + if(out_maxlen < 25) out_maxlen = 25; - if (out_maxlen > (int) payload_limit) + if(out_maxlen > payload_limit) out_maxlen = payload_limit; strm->next_in = (unsigned char *) src; @@ -243,26 +251,29 @@ comp_method_zlib_decomp(LIBSSH2_SESSION * session, strm->next_out = (unsigned char *) LIBSSH2_ALLOC(session, out_maxlen); out = (char *) strm->next_out; strm->avail_out = out_maxlen; - if (!strm->next_out) + if(!strm->next_out) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate decompression buffer"); /* Loop until it's all inflated or hit error */ - for (;;) { + for(;;) { int status; size_t out_ofs; char *newout; status = inflate(strm, Z_PARTIAL_FLUSH); - if (status == Z_OK) { - if (strm->avail_out > 0) - /* status is OK and the output buffer has not been exhausted so we're done */ + if(status == Z_OK) { + if(strm->avail_out > 0) + /* status is OK and the output buffer has not been exhausted + so we're done */ break; - } else if (status == Z_BUF_ERROR) { + } + else if(status == Z_BUF_ERROR) { /* the input data has been exhausted so we are done */ break; - } else { + } + else { /* error state */ LIBSSH2_FREE(session, out); _libssh2_debug(session, LIBSSH2_TRACE_TRANS, @@ -271,7 +282,7 @@ comp_method_zlib_decomp(LIBSSH2_SESSION * session, "decompression failure"); } - if (out_maxlen >= (int) payload_limit) { + if(out_maxlen > payload_limit || out_maxlen > SIZE_MAX / 2) { LIBSSH2_FREE(session, out); return _libssh2_error(session, LIBSSH2_ERROR_ZLIB, "Excessive growth in decompression phase"); @@ -281,7 +292,7 @@ comp_method_zlib_decomp(LIBSSH2_SESSION * session, out_ofs = out_maxlen - strm->avail_out; out_maxlen *= 2; newout = LIBSSH2_REALLOC(session, out, out_maxlen); - if (!newout) { + if(!newout) { LIBSSH2_FREE(session, out); return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to expand decompression buffer"); @@ -306,8 +317,8 @@ comp_method_zlib_dtor(LIBSSH2_SESSION *session, int compr, void **abstract) { z_stream *strm = *abstract; - if (strm) { - if (compr) + if(strm) { + if(compr) deflateEnd(strm); else inflateEnd(strm); diff --git a/vendor/libssh2/src/comp.h b/vendor/libssh2/src/comp.h index 8edc150299..82ac2dc958 100644 --- a/vendor/libssh2/src/comp.h +++ b/vendor/libssh2/src/comp.h @@ -1,6 +1,5 @@ #ifndef __LIBSSH2_COMP_H #define __LIBSSH2_COMP_H - /* Copyright (C) 2009-2010 by Daniel Stenberg * * Redistribution and use in source and binary forms, diff --git a/vendor/libssh2/src/crypt.c b/vendor/libssh2/src/crypt.c index 931ae8b805..8d493b4847 100644 --- a/vendor/libssh2/src/crypt.c +++ b/vendor/libssh2/src/crypt.c @@ -53,10 +53,11 @@ crypt_none_crypt(LIBSSH2_SESSION * session, unsigned char *buf, static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_none = { "none", - 8, /* blocksize (SSH2 defines minimum blocksize as 8) */ - 0, /* iv_len */ - 0, /* secret_len */ - 0, /* flags */ + "DEK-Info: NONE", + 8, /* blocksize (SSH2 defines minimum blocksize as 8) */ + 0, /* iv_len */ + 0, /* secret_len */ + 0, /* flags */ NULL, crypt_none_crypt, NULL @@ -79,12 +80,12 @@ crypt_init(LIBSSH2_SESSION * session, { struct crypt_ctx *ctx = LIBSSH2_ALLOC(session, sizeof(struct crypt_ctx)); - if (!ctx) + if(!ctx) return LIBSSH2_ERROR_ALLOC; ctx->encrypt = encrypt; ctx->algo = method->algo; - if (_libssh2_cipher_init(&ctx->h, ctx->algo, iv, secret, encrypt)) { + if(_libssh2_cipher_init(&ctx->h, ctx->algo, iv, secret, encrypt)) { LIBSSH2_FREE(session, ctx); return -1; } @@ -108,7 +109,7 @@ static int crypt_dtor(LIBSSH2_SESSION * session, void **abstract) { struct crypt_ctx **cctx = (struct crypt_ctx **) abstract; - if (cctx && *cctx) { + if(cctx && *cctx) { _libssh2_cipher_dtor(&(*cctx)->h); LIBSSH2_FREE(session, *cctx); *abstract = NULL; @@ -119,6 +120,7 @@ crypt_dtor(LIBSSH2_SESSION * session, void **abstract) #if LIBSSH2_AES_CTR static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes128_ctr = { "aes128-ctr", + "", 16, /* blocksize */ 16, /* initial value length */ 16, /* secret length -- 16*8 == 128bit */ @@ -131,6 +133,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes128_ctr = { static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes192_ctr = { "aes192-ctr", + "", 16, /* blocksize */ 16, /* initial value length */ 24, /* secret length -- 24*8 == 192bit */ @@ -143,6 +146,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes192_ctr = { static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes256_ctr = { "aes256-ctr", + "", 16, /* blocksize */ 16, /* initial value length */ 32, /* secret length -- 32*8 == 256bit */ @@ -157,6 +161,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes256_ctr = { #if LIBSSH2_AES static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes128_cbc = { "aes128-cbc", + "DEK-Info: AES-128-CBC", 16, /* blocksize */ 16, /* initial value length */ 16, /* secret length -- 16*8 == 128bit */ @@ -169,6 +174,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes128_cbc = { static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes192_cbc = { "aes192-cbc", + "DEK-Info: AES-192-CBC", 16, /* blocksize */ 16, /* initial value length */ 24, /* secret length -- 24*8 == 192bit */ @@ -181,6 +187,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes192_cbc = { static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes256_cbc = { "aes256-cbc", + "DEK-Info: AES-256-CBC", 16, /* blocksize */ 16, /* initial value length */ 32, /* secret length -- 32*8 == 256bit */ @@ -195,6 +202,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_aes256_cbc = { static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_rijndael_cbc_lysator_liu_se = { "rijndael-cbc@lysator.liu.se", + "DEK-Info: AES-256-CBC", 16, /* blocksize */ 16, /* initial value length */ 32, /* secret length -- 32*8 == 256bit */ @@ -209,6 +217,7 @@ static const LIBSSH2_CRYPT_METHOD #if LIBSSH2_BLOWFISH static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_blowfish_cbc = { "blowfish-cbc", + "", 8, /* blocksize */ 8, /* initial value length */ 16, /* secret length */ @@ -223,6 +232,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_blowfish_cbc = { #if LIBSSH2_RC4 static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_arcfour = { "arcfour", + "DEK-Info: RC4", 8, /* blocksize */ 8, /* initial value length */ 16, /* secret length */ @@ -242,13 +252,13 @@ crypt_init_arcfour128(LIBSSH2_SESSION * session, { int rc; - rc = crypt_init (session, method, iv, free_iv, secret, free_secret, - encrypt, abstract); - if (rc == 0) { + rc = crypt_init(session, method, iv, free_iv, secret, free_secret, + encrypt, abstract); + if(rc == 0) { struct crypt_ctx *cctx = *(struct crypt_ctx **) abstract; unsigned char block[8]; size_t discard = 1536; - for (; discard; discard -= 8) + for(; discard; discard -= 8) _libssh2_cipher_crypt(&cctx->h, cctx->algo, cctx->encrypt, block, method->blocksize); } @@ -258,6 +268,7 @@ crypt_init_arcfour128(LIBSSH2_SESSION * session, static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_arcfour128 = { "arcfour128", + "", 8, /* blocksize */ 8, /* initial value length */ 16, /* secret length */ @@ -272,6 +283,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_arcfour128 = { #if LIBSSH2_CAST static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_cast128_cbc = { "cast128-cbc", + "", 8, /* blocksize */ 8, /* initial value length */ 16, /* secret length */ @@ -286,6 +298,7 @@ static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_cast128_cbc = { #if LIBSSH2_3DES static const LIBSSH2_CRYPT_METHOD libssh2_crypt_method_3des_cbc = { "3des-cbc", + "DEK-Info: DES-EDE3-CBC", 8, /* blocksize */ 8, /* initial value length */ 24, /* secret length */ diff --git a/vendor/libssh2/src/crypto.h b/vendor/libssh2/src/crypto.h index aa997a3071..809aef7e99 100644 --- a/vendor/libssh2/src/crypto.h +++ b/vendor/libssh2/src/crypto.h @@ -1,6 +1,8 @@ +#ifndef __LIBSSH2_CRYPTO_H +#define __LIBSSH2_CRYPTO_H /* Copyright (C) 2009, 2010 Simon Josefsson * Copyright (C) 2006, 2007 The Written Word, Inc. All rights reserved. - * Copyright (C) 2010 Daniel Stenberg + * Copyright (C) 2010-2019 Daniel Stenberg * * Redistribution and use in source and binary forms, * with or without modification, are permitted provided @@ -35,8 +37,6 @@ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ -#ifndef LIBSSH2_CRYPTO_H -#define LIBSSH2_CRYPTO_H #ifdef LIBSSH2_OPENSSL #include "openssl.h" @@ -58,6 +58,11 @@ #include "mbedtls.h" #endif +#define LIBSSH2_ED25519_KEY_LEN 32 +#define LIBSSH2_ED25519_PRIVATE_KEY_LEN 64 +#define LIBSSH2_ED25519_SIG_LEN 64 + +#if LIBSSH2_RSA int _libssh2_rsa_new(libssh2_rsa_ctx ** rsa, const unsigned char *edata, unsigned long elen, @@ -88,10 +93,25 @@ int _libssh2_rsa_sha1_sign(LIBSSH2_SESSION * session, size_t hash_len, unsigned char **signature, size_t *signature_len); +#if LIBSSH2_RSA_SHA2 +int _libssh2_rsa_sha2_sign(LIBSSH2_SESSION * session, + libssh2_rsa_ctx * rsactx, + const unsigned char *hash, + size_t hash_len, + unsigned char **signature, + size_t *signature_len); +int _libssh2_rsa_sha2_verify(libssh2_rsa_ctx * rsa, + size_t hash_len, + const unsigned char *sig, + unsigned long sig_len, + const unsigned char *m, unsigned long m_len); +#endif int _libssh2_rsa_new_private_frommemory(libssh2_rsa_ctx ** rsa, LIBSSH2_SESSION * session, - const char *filedata, size_t filedata_len, + const char *filedata, + size_t filedata_len, unsigned const char *passphrase); +#endif #if LIBSSH2_DSA int _libssh2_dsa_new(libssh2_dsa_ctx ** dsa, @@ -116,10 +136,102 @@ int _libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, unsigned long hash_len, unsigned char *sig); int _libssh2_dsa_new_private_frommemory(libssh2_dsa_ctx ** dsa, LIBSSH2_SESSION * session, - const char *filedata, size_t filedata_len, + const char *filedata, + size_t filedata_len, unsigned const char *passphrase); #endif +#if LIBSSH2_ECDSA +int +_libssh2_ecdsa_curve_name_with_octal_new(libssh2_ecdsa_ctx ** ecdsactx, + const unsigned char *k, + size_t k_len, + libssh2_curve_type type); +int +_libssh2_ecdsa_new_private(libssh2_ecdsa_ctx ** ec_ctx, + LIBSSH2_SESSION * session, + const char *filename, + unsigned const char *passphrase); + +int +_libssh2_ecdsa_verify(libssh2_ecdsa_ctx * ctx, + const unsigned char *r, size_t r_len, + const unsigned char *s, size_t s_len, + const unsigned char *m, size_t m_len); + +int +_libssh2_ecdsa_create_key(LIBSSH2_SESSION *session, + _libssh2_ec_key **out_private_key, + unsigned char **out_public_key_octal, + size_t *out_public_key_octal_len, + libssh2_curve_type curve_type); + +int +_libssh2_ecdh_gen_k(_libssh2_bn **k, _libssh2_ec_key *private_key, + const unsigned char *server_public_key, + size_t server_public_key_len); + +int +_libssh2_ecdsa_sign(LIBSSH2_SESSION *session, libssh2_ecdsa_ctx *ec_ctx, + const unsigned char *hash, unsigned long hash_len, + unsigned char **signature, size_t *signature_len); + +int _libssh2_ecdsa_new_private_frommemory(libssh2_ecdsa_ctx ** ec_ctx, + LIBSSH2_SESSION * session, + const char *filedata, + size_t filedata_len, + unsigned const char *passphrase); + +libssh2_curve_type +_libssh2_ecdsa_get_curve_type(libssh2_ecdsa_ctx *ec_ctx); + +int +_libssh2_ecdsa_curve_type_from_name(const char *name, + libssh2_curve_type *out_type); + +#endif /* LIBSSH2_ECDSA */ + +#if LIBSSH2_ED25519 + +int +_libssh2_curve25519_new(LIBSSH2_SESSION *session, uint8_t **out_public_key, + uint8_t **out_private_key); + +int +_libssh2_curve25519_gen_k(_libssh2_bn **k, + uint8_t private_key[LIBSSH2_ED25519_KEY_LEN], + uint8_t server_public_key[LIBSSH2_ED25519_KEY_LEN]); + +int +_libssh2_ed25519_verify(libssh2_ed25519_ctx *ctx, const uint8_t *s, + size_t s_len, const uint8_t *m, size_t m_len); + +int +_libssh2_ed25519_new_private(libssh2_ed25519_ctx **ed_ctx, + LIBSSH2_SESSION *session, + const char *filename, const uint8_t *passphrase); + +int +_libssh2_ed25519_new_public(libssh2_ed25519_ctx **ed_ctx, + LIBSSH2_SESSION *session, + const unsigned char *raw_pub_key, + const uint8_t key_len); + +int +_libssh2_ed25519_sign(libssh2_ed25519_ctx *ctx, LIBSSH2_SESSION *session, + uint8_t **out_sig, size_t *out_sig_len, + const uint8_t *message, size_t message_len); + +int +_libssh2_ed25519_new_private_frommemory(libssh2_ed25519_ctx **ed_ctx, + LIBSSH2_SESSION *session, + const char *filedata, + size_t filedata_len, + unsigned const char *passphrase); + +#endif /* LIBSSH2_ED25519 */ + + int _libssh2_cipher_init(_libssh2_cipher_ctx * h, _libssh2_cipher_type(algo), unsigned char *iv, @@ -136,6 +248,7 @@ int _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, size_t *pubkeydata_len, const char *privatekey, const char *passphrase); + int _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, unsigned char **method, size_t *method_len, @@ -145,6 +258,23 @@ int _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, size_t privatekeydata_len, const char *passphrase); -void _libssh2_init_aes_ctr(void); -#endif +/** + * @function _libssh2_supported_key_sign_algorithms + * @abstract Returns supported algorithms used for upgrading public + * key signing RFC 8332 + * @discussion Based on the incoming key_method value, this function + * will return supported algorithms that can upgrade the key method + * @related _libssh2_key_sign_algorithm() + * @param key_method current key method, usually the default key sig method + * @param key_method_len length of the key method buffer + * @result comma seperated list of supported upgrade options per RFC 8332, if + * there is no upgrade option return NULL + */ + +const char * +_libssh2_supported_key_sign_algorithms(LIBSSH2_SESSION *session, + unsigned char *key_method, + size_t key_method_len); + +#endif /* __LIBSSH2_CRYPTO_H */ diff --git a/vendor/libssh2/src/global.c b/vendor/libssh2/src/global.c index dc45e70036..68289845f0 100644 --- a/vendor/libssh2/src/global.c +++ b/vendor/libssh2/src/global.c @@ -44,9 +44,8 @@ static int _libssh2_init_flags = 0; LIBSSH2_API int libssh2_init(int flags) { - if (_libssh2_initialized == 0 && !(flags & LIBSSH2_INIT_NO_CRYPTO)) { + if(_libssh2_initialized == 0 && !(flags & LIBSSH2_INIT_NO_CRYPTO)) { libssh2_crypto_init(); - _libssh2_init_aes_ctr(); } _libssh2_initialized++; @@ -58,12 +57,13 @@ libssh2_init(int flags) LIBSSH2_API void libssh2_exit(void) { - if (_libssh2_initialized == 0) + if(_libssh2_initialized == 0) return; _libssh2_initialized--; - if (!(_libssh2_init_flags & LIBSSH2_INIT_NO_CRYPTO)) { + if(_libssh2_initialized == 0 && + !(_libssh2_init_flags & LIBSSH2_INIT_NO_CRYPTO)) { libssh2_crypto_exit(); } @@ -73,6 +73,6 @@ libssh2_exit(void) void _libssh2_init_if_needed(void) { - if (_libssh2_initialized == 0) + if(_libssh2_initialized == 0) (void)libssh2_init (0); } diff --git a/vendor/libssh2/src/hostkey.c b/vendor/libssh2/src/hostkey.c index 2a0a8f943b..eeb9e579e3 100644 --- a/vendor/libssh2/src/hostkey.c +++ b/vendor/libssh2/src/hostkey.c @@ -1,5 +1,5 @@ /* Copyright (c) 2004-2006, Sara Golemon - * Copyright (c) 2009-2014 by Daniel Stenberg + * Copyright (c) 2009-2019 by Daniel Stenberg * All rights reserved. * * Redistribution and use in source and binary forms, @@ -64,38 +64,55 @@ hostkey_method_ssh_rsa_init(LIBSSH2_SESSION * session, void **abstract) { libssh2_rsa_ctx *rsactx; - const unsigned char *s, *e, *n; - unsigned long len, e_len, n_len; - int ret; - - (void) hostkey_data_len; + unsigned char *e, *n, *type; + size_t e_len, n_len, type_len; + struct string_buf buf; - if (*abstract) { + if(*abstract) { hostkey_method_ssh_rsa_dtor(session, abstract); *abstract = NULL; } - s = hostkey_data; - len = _libssh2_ntohu32(s); - s += 4; + if(hostkey_data_len < 19) { + _libssh2_debug(session, LIBSSH2_TRACE_ERROR, + "host key length too short"); + return -1; + } - if (len != 7 || strncmp((char *) s, "ssh-rsa", 7) != 0) { + buf.data = (unsigned char *)hostkey_data; + buf.dataptr = buf.data; + buf.len = hostkey_data_len; + + if(_libssh2_get_string(&buf, &type, &type_len)) { return -1; } - s += 7; - e_len = _libssh2_ntohu32(s); - s += 4; + /* we accept one of 3 header types */ + if(type_len == 7 && strncmp("ssh-rsa", (char *)type, 7) == 0) { + /* ssh-rsa */ + } +#if LIBSSH2_RSA_SHA2 + else if(type_len == 12 && strncmp("rsa-sha2-256", (char *)type, 12) == 0) { + /* rsa-sha2-256 */ + } + else if(type_len == 12 && strncmp("rsa-sha2-512", (char *)type, 12) == 0) { + /* rsa-sha2-512 */ + } +#endif + else { + _libssh2_debug(session, LIBSSH2_TRACE_ERROR, + "unexpected rsa type: %.*s", type_len, type); + return -1; + } - e = s; - s += e_len; - n_len = _libssh2_ntohu32(s); - s += 4; - n = s; + if(_libssh2_get_string(&buf, &e, &e_len)) + return -1; - ret = _libssh2_rsa_new(&rsactx, e, e_len, n, n_len, NULL, 0, - NULL, 0, NULL, 0, NULL, 0, NULL, 0, NULL, 0); - if (ret) { + if(_libssh2_get_string(&buf, &n, &n_len)) + return -1; + + if(_libssh2_rsa_new(&rsactx, e, e_len, n, n_len, NULL, 0, + NULL, 0, NULL, 0, NULL, 0, NULL, 0, NULL, 0)) { return -1; } @@ -118,13 +135,13 @@ hostkey_method_ssh_rsa_initPEM(LIBSSH2_SESSION * session, libssh2_rsa_ctx *rsactx; int ret; - if (*abstract) { + if(*abstract) { hostkey_method_ssh_rsa_dtor(session, abstract); *abstract = NULL; } ret = _libssh2_rsa_new_private(&rsactx, session, privkeyfile, passphrase); - if (ret) { + if(ret) { return -1; } @@ -148,7 +165,7 @@ hostkey_method_ssh_rsa_initPEMFromMemory(LIBSSH2_SESSION * session, libssh2_rsa_ctx *rsactx; int ret; - if (*abstract) { + if(*abstract) { hostkey_method_ssh_rsa_dtor(session, abstract); *abstract = NULL; } @@ -156,7 +173,7 @@ hostkey_method_ssh_rsa_initPEMFromMemory(LIBSSH2_SESSION * session, ret = _libssh2_rsa_new_private_frommemory(&rsactx, session, privkeyfiledata, privkeyfiledata_len, passphrase); - if (ret) { + if(ret) { return -1; } @@ -181,6 +198,9 @@ hostkey_method_ssh_rsa_sig_verify(LIBSSH2_SESSION * session, (void) session; /* Skip past keyname_len(4) + keyname(7){"ssh-rsa"} + signature_len(4) */ + if(sig_len < 15) + return -1; + sig += 15; sig_len -= 15; return _libssh2_rsa_sha1_verify(rsactx, sig, sig_len, m, m_len); @@ -218,7 +238,76 @@ hostkey_method_ssh_rsa_signv(LIBSSH2_SESSION * session, ret = _libssh2_rsa_sha1_sign(session, rsactx, hash, SHA_DIGEST_LENGTH, signature, signature_len); - if (ret) { + if(ret) { + return -1; + } + + return 0; +#endif +} + +/* + * hostkey_method_ssh_rsa_sha2_256_sig_verify + * + * Verify signature created by remote + */ +#if LIBSSH2_RSA_SHA2 + +static int +hostkey_method_ssh_rsa_sha2_256_sig_verify(LIBSSH2_SESSION * session, + const unsigned char *sig, + size_t sig_len, + const unsigned char *m, + size_t m_len, void **abstract) +{ + libssh2_rsa_ctx *rsactx = (libssh2_rsa_ctx *) (*abstract); + (void) session; + + /* Skip past keyname_len(4) + keyname(12){"rsa-sha2-256"} + + signature_len(4) */ + if(sig_len < 20) + return -1; + + sig += 20; + sig_len -= 20; + return _libssh2_rsa_sha2_verify(rsactx, SHA256_DIGEST_LENGTH, sig, sig_len, + m, m_len); +} + +/* + * hostkey_method_ssh_rsa_sha2_256_signv + * + * Construct a signature from an array of vectors + */ + +static int +hostkey_method_ssh_rsa_sha2_256_signv(LIBSSH2_SESSION * session, + unsigned char **signature, + size_t *signature_len, + int veccount, + const struct iovec datavec[], + void **abstract) +{ + libssh2_rsa_ctx *rsactx = (libssh2_rsa_ctx *) (*abstract); + +#ifdef _libssh2_rsa_sha2_256_signv + return _libssh2_rsa_sha2_256_signv(session, signature, signature_len, + veccount, datavec, rsactx); +#else + int ret; + int i; + unsigned char hash[SHA256_DIGEST_LENGTH]; + libssh2_sha256_ctx ctx; + + libssh2_sha256_init(&ctx); + for(i = 0; i < veccount; i++) { + libssh2_sha256_update(ctx, datavec[i].iov_base, datavec[i].iov_len); + } + libssh2_sha256_final(ctx, hash); + + ret = _libssh2_rsa_sha2_sign(session, rsactx, hash, SHA256_DIGEST_LENGTH, + signature, signature_len); + if(ret) { return -1; } @@ -226,6 +315,77 @@ hostkey_method_ssh_rsa_signv(LIBSSH2_SESSION * session, #endif } +/* + * hostkey_method_ssh_rsa_sha2_512_sig_verify + * + * Verify signature created by remote + */ + +static int +hostkey_method_ssh_rsa_sha2_512_sig_verify(LIBSSH2_SESSION * session, + const unsigned char *sig, + size_t sig_len, + const unsigned char *m, + size_t m_len, void **abstract) +{ + libssh2_rsa_ctx *rsactx = (libssh2_rsa_ctx *) (*abstract); + (void) session; + + /* Skip past keyname_len(4) + keyname(12){"rsa-sha2-512"} + + signature_len(4) */ + if(sig_len < 20) + return -1; + + sig += 20; + sig_len -= 20; + return _libssh2_rsa_sha2_verify(rsactx, SHA512_DIGEST_LENGTH, sig, + sig_len, m, m_len); +} + + +/* + * hostkey_method_ssh_rsa_sha2_512_signv + * + * Construct a signature from an array of vectors + */ +static int +hostkey_method_ssh_rsa_sha2_512_signv(LIBSSH2_SESSION * session, + unsigned char **signature, + size_t *signature_len, + int veccount, + const struct iovec datavec[], + void **abstract) +{ + libssh2_rsa_ctx *rsactx = (libssh2_rsa_ctx *) (*abstract); + +#ifdef _libssh2_rsa_sha2_512_signv + return _libssh2_rsa_sha2_512_signv(session, signature, signature_len, + veccount, datavec, rsactx); +#else + int ret; + int i; + unsigned char hash[SHA512_DIGEST_LENGTH]; + libssh2_sha512_ctx ctx; + + libssh2_sha512_init(&ctx); + for(i = 0; i < veccount; i++) { + libssh2_sha512_update(ctx, datavec[i].iov_base, datavec[i].iov_len); + } + libssh2_sha512_final(ctx, hash); + + ret = _libssh2_rsa_sha2_sign(session, rsactx, hash, SHA512_DIGEST_LENGTH, + signature, signature_len); + if(ret) { + return -1; + } + + return 0; +#endif +} + +#endif /* LIBSSH2_RSA_SHA2 */ + + /* * hostkey_method_ssh_rsa_dtor * @@ -259,6 +419,35 @@ static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ssh_rsa = { NULL, /* encrypt */ hostkey_method_ssh_rsa_dtor, }; + +#if LIBSSH2_RSA_SHA2 + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ssh_rsa_sha2_256 = { + "rsa-sha2-256", + SHA256_DIGEST_LENGTH, + hostkey_method_ssh_rsa_init, + hostkey_method_ssh_rsa_initPEM, + hostkey_method_ssh_rsa_initPEMFromMemory, + hostkey_method_ssh_rsa_sha2_256_sig_verify, + hostkey_method_ssh_rsa_sha2_256_signv, + NULL, /* encrypt */ + hostkey_method_ssh_rsa_dtor, +}; + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ssh_rsa_sha2_512 = { + "rsa-sha2-512", + SHA512_DIGEST_LENGTH, + hostkey_method_ssh_rsa_init, + hostkey_method_ssh_rsa_initPEM, + hostkey_method_ssh_rsa_initPEMFromMemory, + hostkey_method_ssh_rsa_sha2_512_sig_verify, + hostkey_method_ssh_rsa_sha2_512_signv, + NULL, /* encrypt */ + hostkey_method_ssh_rsa_dtor, +}; + +#endif /* LIBSSH2_RSA_SHA2 */ + #endif /* LIBSSH2_RSA */ #if LIBSSH2_DSA @@ -281,45 +470,42 @@ hostkey_method_ssh_dss_init(LIBSSH2_SESSION * session, void **abstract) { libssh2_dsa_ctx *dsactx; - const unsigned char *p, *q, *g, *y, *s; - unsigned long p_len, q_len, g_len, y_len, len; - int ret; - - (void) hostkey_data_len; + unsigned char *p, *q, *g, *y; + size_t p_len, q_len, g_len, y_len; + struct string_buf buf; - if (*abstract) { + if(*abstract) { hostkey_method_ssh_dss_dtor(session, abstract); *abstract = NULL; } - s = hostkey_data; - len = _libssh2_ntohu32(s); - s += 4; - if (len != 7 || strncmp((char *) s, "ssh-dss", 7) != 0) { + if(hostkey_data_len < 27) { + _libssh2_debug(session, LIBSSH2_TRACE_ERROR, + "host key length too short"); return -1; } - s += 7; - p_len = _libssh2_ntohu32(s); - s += 4; - p = s; - s += p_len; - q_len = _libssh2_ntohu32(s); - s += 4; - q = s; - s += q_len; - g_len = _libssh2_ntohu32(s); - s += 4; - g = s; - s += g_len; - y_len = _libssh2_ntohu32(s); - s += 4; - y = s; - /* s += y_len; */ + buf.data = (unsigned char *)hostkey_data; + buf.dataptr = buf.data; + buf.len = hostkey_data_len; + + if(_libssh2_match_string(&buf, "ssh-dss")) + return -1; + + if(_libssh2_get_string(&buf, &p, &p_len)) + return -1; - ret = _libssh2_dsa_new(&dsactx, p, p_len, q, q_len, - g, g_len, y, y_len, NULL, 0); - if (ret) { + if(_libssh2_get_string(&buf, &q, &q_len)) + return -1; + + if(_libssh2_get_string(&buf, &g, &g_len)) + return -1; + + if(_libssh2_get_string(&buf, &y, &y_len)) + return -1; + + if(_libssh2_dsa_new(&dsactx, p, p_len, q, q_len, + g, g_len, y, y_len, NULL, 0)) { return -1; } @@ -342,13 +528,13 @@ hostkey_method_ssh_dss_initPEM(LIBSSH2_SESSION * session, libssh2_dsa_ctx *dsactx; int ret; - if (*abstract) { + if(*abstract) { hostkey_method_ssh_dss_dtor(session, abstract); *abstract = NULL; } ret = _libssh2_dsa_new_private(&dsactx, session, privkeyfile, passphrase); - if (ret) { + if(ret) { return -1; } @@ -372,7 +558,7 @@ hostkey_method_ssh_dss_initPEMFromMemory(LIBSSH2_SESSION * session, libssh2_dsa_ctx *dsactx; int ret; - if (*abstract) { + if(*abstract) { hostkey_method_ssh_dss_dtor(session, abstract); *abstract = NULL; } @@ -380,7 +566,7 @@ hostkey_method_ssh_dss_initPEMFromMemory(LIBSSH2_SESSION * session, ret = _libssh2_dsa_new_private_frommemory(&dsactx, session, privkeyfiledata, privkeyfiledata_len, passphrase); - if (ret) { + if(ret) { return -1; } @@ -404,12 +590,14 @@ hostkey_method_ssh_dss_sig_verify(LIBSSH2_SESSION * session, libssh2_dsa_ctx *dsactx = (libssh2_dsa_ctx *) (*abstract); /* Skip past keyname_len(4) + keyname(7){"ssh-dss"} + signature_len(4) */ - sig += 15; - sig_len -= 15; - if (sig_len != 40) { + if(sig_len != 55) { return _libssh2_error(session, LIBSSH2_ERROR_PROTO, "Invalid DSS signature length"); } + + sig += 15; + sig_len -= 15; + return _libssh2_dsa_sha1_verify(dsactx, sig, m, m_len); } @@ -432,7 +620,7 @@ hostkey_method_ssh_dss_signv(LIBSSH2_SESSION * session, int i; *signature = LIBSSH2_CALLOC(session, 2 * SHA_DIGEST_LENGTH); - if (!*signature) { + if(!*signature) { return -1; } @@ -444,7 +632,7 @@ hostkey_method_ssh_dss_signv(LIBSSH2_SESSION * session, } libssh2_sha1_final(ctx, hash); - if (_libssh2_dsa_sha1_sign(dsactx, hash, SHA_DIGEST_LENGTH, *signature)) { + if(_libssh2_dsa_sha1_sign(dsactx, hash, SHA_DIGEST_LENGTH, *signature)) { LIBSSH2_FREE(session, *signature); return -1; } @@ -483,8 +671,570 @@ static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ssh_dss = { }; #endif /* LIBSSH2_DSA */ +#if LIBSSH2_ECDSA + +/* *********** + * ecdsa-sha2-nistp256/384/521 * + *********** */ + +static int +hostkey_method_ssh_ecdsa_dtor(LIBSSH2_SESSION * session, + void **abstract); + +/* + * hostkey_method_ssh_ecdsa_init + * + * Initialize the server hostkey working area with e/n pair + */ +static int +hostkey_method_ssh_ecdsa_init(LIBSSH2_SESSION * session, + const unsigned char *hostkey_data, + size_t hostkey_data_len, + void **abstract) +{ + libssh2_ecdsa_ctx *ecdsactx = NULL; + unsigned char *type_str, *domain, *public_key; + size_t key_len, len; + libssh2_curve_type type; + struct string_buf buf; + + if(abstract != NULL && *abstract) { + hostkey_method_ssh_ecdsa_dtor(session, abstract); + *abstract = NULL; + } + + if(hostkey_data_len < 39) { + _libssh2_debug(session, LIBSSH2_TRACE_ERROR, + "host key length too short"); + return -1; + } + + buf.data = (unsigned char *)hostkey_data; + buf.dataptr = buf.data; + buf.len = hostkey_data_len; + + if(_libssh2_get_string(&buf, &type_str, &len) || len != 19) + return -1; + + if(strncmp((char *) type_str, "ecdsa-sha2-nistp256", 19) == 0) { + type = LIBSSH2_EC_CURVE_NISTP256; + } + else if(strncmp((char *) type_str, "ecdsa-sha2-nistp384", 19) == 0) { + type = LIBSSH2_EC_CURVE_NISTP384; + } + else if(strncmp((char *) type_str, "ecdsa-sha2-nistp521", 19) == 0) { + type = LIBSSH2_EC_CURVE_NISTP521; + } + else { + return -1; + } + + if(_libssh2_get_string(&buf, &domain, &len) || len != 8) + return -1; + + if(type == LIBSSH2_EC_CURVE_NISTP256 && + strncmp((char *)domain, "nistp256", 8) != 0) { + return -1; + } + else if(type == LIBSSH2_EC_CURVE_NISTP384 && + strncmp((char *)domain, "nistp384", 8) != 0) { + return -1; + } + else if(type == LIBSSH2_EC_CURVE_NISTP521 && + strncmp((char *)domain, "nistp521", 8) != 0) { + return -1; + } + + /* public key */ + if(_libssh2_get_string(&buf, &public_key, &key_len)) + return -1; + + if(_libssh2_ecdsa_curve_name_with_octal_new(&ecdsactx, public_key, + key_len, type)) + return -1; + + if(abstract != NULL) + *abstract = ecdsactx; + + return 0; +} + +/* + * hostkey_method_ssh_ecdsa_initPEM + * + * Load a Private Key from a PEM file + */ +static int +hostkey_method_ssh_ecdsa_initPEM(LIBSSH2_SESSION * session, + const char *privkeyfile, + unsigned const char *passphrase, + void **abstract) +{ + libssh2_ecdsa_ctx *ec_ctx = NULL; + int ret; + + if(abstract != NULL && *abstract) { + hostkey_method_ssh_ecdsa_dtor(session, abstract); + *abstract = NULL; + } + + ret = _libssh2_ecdsa_new_private(&ec_ctx, session, + privkeyfile, passphrase); + + if(abstract != NULL) + *abstract = ec_ctx; + + return ret; +} + +/* + * hostkey_method_ssh_ecdsa_initPEMFromMemory + * + * Load a Private Key from memory + */ +static int +hostkey_method_ssh_ecdsa_initPEMFromMemory(LIBSSH2_SESSION * session, + const char *privkeyfiledata, + size_t privkeyfiledata_len, + unsigned const char *passphrase, + void **abstract) +{ + libssh2_ecdsa_ctx *ec_ctx = NULL; + int ret; + + if(abstract != NULL && *abstract) { + hostkey_method_ssh_ecdsa_dtor(session, abstract); + *abstract = NULL; + } + + ret = _libssh2_ecdsa_new_private_frommemory(&ec_ctx, session, + privkeyfiledata, + privkeyfiledata_len, + passphrase); + if(ret) { + return -1; + } + + if(abstract != NULL) + *abstract = ec_ctx; + + return 0; +} + +/* + * hostkey_method_ecdsa_sig_verify + * + * Verify signature created by remote + */ +static int +hostkey_method_ssh_ecdsa_sig_verify(LIBSSH2_SESSION * session, + const unsigned char *sig, + size_t sig_len, + const unsigned char *m, + size_t m_len, void **abstract) +{ + unsigned char *r, *s, *name; + size_t r_len, s_len, name_len; + uint32_t len; + struct string_buf buf; + libssh2_ecdsa_ctx *ctx = (libssh2_ecdsa_ctx *) (*abstract); + + (void) session; + + if(sig_len < 35) + return -1; + + /* keyname_len(4) + keyname(19){"ecdsa-sha2-nistp256"} + + signature_len(4) */ + buf.data = (unsigned char *)sig; + buf.dataptr = buf.data; + buf.len = sig_len; + + if(_libssh2_get_string(&buf, &name, &name_len) || name_len != 19) + return -1; + + if(_libssh2_get_u32(&buf, &len) != 0 || len < 8) + return -1; + + if(_libssh2_get_string(&buf, &r, &r_len)) + return -1; + + if(_libssh2_get_string(&buf, &s, &s_len)) + return -1; + + return _libssh2_ecdsa_verify(ctx, r, r_len, s, s_len, m, m_len); +} + + +#define LIBSSH2_HOSTKEY_METHOD_EC_SIGNV_HASH(digest_type) \ + { \ + unsigned char hash[SHA##digest_type##_DIGEST_LENGTH]; \ + libssh2_sha##digest_type##_ctx ctx; \ + int i; \ + libssh2_sha##digest_type##_init(&ctx); \ + for(i = 0; i < veccount; i++) { \ + libssh2_sha##digest_type##_update(ctx, datavec[i].iov_base, \ + datavec[i].iov_len); \ + } \ + libssh2_sha##digest_type##_final(ctx, hash); \ + ret = _libssh2_ecdsa_sign(session, ec_ctx, hash, \ + SHA##digest_type##_DIGEST_LENGTH, \ + signature, signature_len); \ + } + + +/* + * hostkey_method_ecdsa_signv + * + * Construct a signature from an array of vectors + */ +static int +hostkey_method_ssh_ecdsa_signv(LIBSSH2_SESSION * session, + unsigned char **signature, + size_t *signature_len, + int veccount, + const struct iovec datavec[], + void **abstract) +{ + libssh2_ecdsa_ctx *ec_ctx = (libssh2_ecdsa_ctx *) (*abstract); + libssh2_curve_type type = _libssh2_ecdsa_get_curve_type(ec_ctx); + int ret = 0; + + if(type == LIBSSH2_EC_CURVE_NISTP256) { + LIBSSH2_HOSTKEY_METHOD_EC_SIGNV_HASH(256); + } + else if(type == LIBSSH2_EC_CURVE_NISTP384) { + LIBSSH2_HOSTKEY_METHOD_EC_SIGNV_HASH(384); + } + else if(type == LIBSSH2_EC_CURVE_NISTP521) { + LIBSSH2_HOSTKEY_METHOD_EC_SIGNV_HASH(512); + } + else { + return -1; + } + + return ret; +} + +/* + * hostkey_method_ssh_ecdsa_dtor + * + * Shutdown the hostkey by freeing EC_KEY context + */ +static int +hostkey_method_ssh_ecdsa_dtor(LIBSSH2_SESSION * session, void **abstract) +{ + libssh2_ecdsa_ctx *keyctx = (libssh2_ecdsa_ctx *) (*abstract); + (void) session; + + if(keyctx != NULL) + _libssh2_ecdsa_free(keyctx); + + *abstract = NULL; + + return 0; +} + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ecdsa_ssh_nistp256 = { + "ecdsa-sha2-nistp256", + SHA256_DIGEST_LENGTH, + hostkey_method_ssh_ecdsa_init, + hostkey_method_ssh_ecdsa_initPEM, + hostkey_method_ssh_ecdsa_initPEMFromMemory, + hostkey_method_ssh_ecdsa_sig_verify, + hostkey_method_ssh_ecdsa_signv, + NULL, /* encrypt */ + hostkey_method_ssh_ecdsa_dtor, +}; + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ecdsa_ssh_nistp384 = { + "ecdsa-sha2-nistp384", + SHA384_DIGEST_LENGTH, + hostkey_method_ssh_ecdsa_init, + hostkey_method_ssh_ecdsa_initPEM, + hostkey_method_ssh_ecdsa_initPEMFromMemory, + hostkey_method_ssh_ecdsa_sig_verify, + hostkey_method_ssh_ecdsa_signv, + NULL, /* encrypt */ + hostkey_method_ssh_ecdsa_dtor, +}; + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ecdsa_ssh_nistp521 = { + "ecdsa-sha2-nistp521", + SHA512_DIGEST_LENGTH, + hostkey_method_ssh_ecdsa_init, + hostkey_method_ssh_ecdsa_initPEM, + hostkey_method_ssh_ecdsa_initPEMFromMemory, + hostkey_method_ssh_ecdsa_sig_verify, + hostkey_method_ssh_ecdsa_signv, + NULL, /* encrypt */ + hostkey_method_ssh_ecdsa_dtor, +}; + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ecdsa_ssh_nistp256_cert = { + "ecdsa-sha2-nistp256-cert-v01@openssh.com", + SHA256_DIGEST_LENGTH, + NULL, + hostkey_method_ssh_ecdsa_initPEM, + hostkey_method_ssh_ecdsa_initPEMFromMemory, + NULL, + hostkey_method_ssh_ecdsa_signv, + NULL, /* encrypt */ + hostkey_method_ssh_ecdsa_dtor, +}; + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ecdsa_ssh_nistp384_cert = { + "ecdsa-sha2-nistp384-cert-v01@openssh.com", + SHA384_DIGEST_LENGTH, + NULL, + hostkey_method_ssh_ecdsa_initPEM, + hostkey_method_ssh_ecdsa_initPEMFromMemory, + NULL, + hostkey_method_ssh_ecdsa_signv, + NULL, /* encrypt */ + hostkey_method_ssh_ecdsa_dtor, +}; + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ecdsa_ssh_nistp521_cert = { + "ecdsa-sha2-nistp521-cert-v01@openssh.com", + SHA512_DIGEST_LENGTH, + NULL, + hostkey_method_ssh_ecdsa_initPEM, + hostkey_method_ssh_ecdsa_initPEMFromMemory, + NULL, + hostkey_method_ssh_ecdsa_signv, + NULL, /* encrypt */ + hostkey_method_ssh_ecdsa_dtor, +}; + +#endif /* LIBSSH2_ECDSA */ + +#if LIBSSH2_ED25519 + +/* *********** + * ed25519 * + *********** */ + +static int hostkey_method_ssh_ed25519_dtor(LIBSSH2_SESSION * session, + void **abstract); + +/* + * hostkey_method_ssh_ed25519_init + * + * Initialize the server hostkey working area with e/n pair + */ +static int +hostkey_method_ssh_ed25519_init(LIBSSH2_SESSION * session, + const unsigned char *hostkey_data, + size_t hostkey_data_len, + void **abstract) +{ + const unsigned char *s; + unsigned long len, key_len; + libssh2_ed25519_ctx *ctx = NULL; + + if(*abstract) { + hostkey_method_ssh_ed25519_dtor(session, abstract); + *abstract = NULL; + } + + if(hostkey_data_len < 19) { + _libssh2_debug(session, LIBSSH2_TRACE_ERROR, + "host key length too short"); + return -1; + } + + s = hostkey_data; + len = _libssh2_ntohu32(s); + s += 4; + + if(len != 11 || strncmp((char *) s, "ssh-ed25519", 11) != 0) { + return -1; + } + + s += 11; + + /* public key */ + key_len = _libssh2_ntohu32(s); + s += 4; + + if(_libssh2_ed25519_new_public(&ctx, session, s, key_len) != 0) { + return -1; + } + + *abstract = ctx; + + return 0; +} + +/* + * hostkey_method_ssh_ed25519_initPEM + * + * Load a Private Key from a PEM file + */ +static int +hostkey_method_ssh_ed25519_initPEM(LIBSSH2_SESSION * session, + const char *privkeyfile, + unsigned const char *passphrase, + void **abstract) +{ + libssh2_ed25519_ctx *ec_ctx = NULL; + int ret; + + if(*abstract) { + hostkey_method_ssh_ed25519_dtor(session, abstract); + *abstract = NULL; + } + + ret = _libssh2_ed25519_new_private(&ec_ctx, session, + privkeyfile, passphrase); + if(ret) { + return -1; + } + + *abstract = ec_ctx; + + return ret; +} + +/* + * hostkey_method_ssh_ed25519_initPEMFromMemory + * + * Load a Private Key from memory + */ +static int +hostkey_method_ssh_ed25519_initPEMFromMemory(LIBSSH2_SESSION * session, + const char *privkeyfiledata, + size_t privkeyfiledata_len, + unsigned const char *passphrase, + void **abstract) +{ + libssh2_ed25519_ctx *ed_ctx = NULL; + int ret; + + if(abstract != NULL && *abstract) { + hostkey_method_ssh_ed25519_dtor(session, abstract); + *abstract = NULL; + } + + ret = _libssh2_ed25519_new_private_frommemory(&ed_ctx, session, + privkeyfiledata, + privkeyfiledata_len, + passphrase); + if(ret) { + return -1; + } + + if(abstract != NULL) + *abstract = ed_ctx; + + return 0; +} + +/* + * hostkey_method_ssh_ed25519_sig_verify + * + * Verify signature created by remote + */ +static int +hostkey_method_ssh_ed25519_sig_verify(LIBSSH2_SESSION * session, + const unsigned char *sig, + size_t sig_len, + const unsigned char *m, + size_t m_len, void **abstract) +{ + libssh2_ed25519_ctx *ctx = (libssh2_ed25519_ctx *) (*abstract); + (void) session; + + if(sig_len < 19) + return -1; + + /* Skip past keyname_len(4) + keyname(11){"ssh-ed25519"} + + signature_len(4) */ + sig += 19; + sig_len -= 19; + + if(sig_len != LIBSSH2_ED25519_SIG_LEN) + return -1; + + return _libssh2_ed25519_verify(ctx, sig, sig_len, m, m_len); +} + +/* + * hostkey_method_ssh_ed25519_signv + * + * Construct a signature from an array of vectors + */ +static int +hostkey_method_ssh_ed25519_signv(LIBSSH2_SESSION * session, + unsigned char **signature, + size_t *signature_len, + int veccount, + const struct iovec datavec[], + void **abstract) +{ + libssh2_ed25519_ctx *ctx = (libssh2_ed25519_ctx *) (*abstract); + + if(veccount != 1) { + return -1; + } + + return _libssh2_ed25519_sign(ctx, session, signature, signature_len, + datavec[0].iov_base, datavec[0].iov_len); +} + + +/* + * hostkey_method_ssh_ed25519_dtor + * + * Shutdown the hostkey by freeing key context + */ +static int +hostkey_method_ssh_ed25519_dtor(LIBSSH2_SESSION * session, void **abstract) +{ + libssh2_ed25519_ctx *keyctx = (libssh2_ed25519_ctx*) (*abstract); + (void) session; + + if(keyctx) + _libssh2_ed25519_free(keyctx); + + *abstract = NULL; + + return 0; +} + +static const LIBSSH2_HOSTKEY_METHOD hostkey_method_ssh_ed25519 = { + "ssh-ed25519", + SHA256_DIGEST_LENGTH, + hostkey_method_ssh_ed25519_init, + hostkey_method_ssh_ed25519_initPEM, + hostkey_method_ssh_ed25519_initPEMFromMemory, + hostkey_method_ssh_ed25519_sig_verify, + hostkey_method_ssh_ed25519_signv, + NULL, /* encrypt */ + hostkey_method_ssh_ed25519_dtor, +}; + +#endif /*LIBSSH2_ED25519*/ + + static const LIBSSH2_HOSTKEY_METHOD *hostkey_methods[] = { +#if LIBSSH2_ECDSA + &hostkey_method_ecdsa_ssh_nistp256, + &hostkey_method_ecdsa_ssh_nistp384, + &hostkey_method_ecdsa_ssh_nistp521, + &hostkey_method_ecdsa_ssh_nistp256_cert, + &hostkey_method_ecdsa_ssh_nistp384_cert, + &hostkey_method_ecdsa_ssh_nistp521_cert, +#endif +#if LIBSSH2_ED25519 + &hostkey_method_ssh_ed25519, +#endif #if LIBSSH2_RSA +#if LIBSSH2_RSA_SHA2 + &hostkey_method_ssh_rsa_sha2_512, + &hostkey_method_ssh_rsa_sha2_256, +#endif /* LIBSSH2_RSA_SHA2 */ &hostkey_method_ssh_rsa, #endif /* LIBSSH2_RSA */ #if LIBSSH2_DSA @@ -505,12 +1255,12 @@ libssh2_hostkey_methods(void) * Returns hash signature * Returned buffer should NOT be freed * Length of buffer is determined by hash type - * i.e. MD5 == 16, SHA1 == 20 + * i.e. MD5 == 16, SHA1 == 20, SHA256 == 32 */ LIBSSH2_API const char * libssh2_hostkey_hash(LIBSSH2_SESSION * session, int hash_type) { - switch (hash_type) { + switch(hash_type) { #if LIBSSH2_MD5 case LIBSSH2_HOSTKEY_HASH_MD5: return (session->server_hostkey_md5_valid) @@ -523,6 +1273,11 @@ libssh2_hostkey_hash(LIBSSH2_SESSION * session, int hash_type) ? (char *) session->server_hostkey_sha1 : NULL; break; + case LIBSSH2_HOSTKEY_HASH_SHA256: + return (session->server_hostkey_sha256_valid) + ? (char *) session->server_hostkey_sha256 + : NULL; + break; default: return NULL; } @@ -530,22 +1285,55 @@ libssh2_hostkey_hash(LIBSSH2_SESSION * session, int hash_type) static int hostkey_type(const unsigned char *hostkey, size_t len) { - const unsigned char rsa[] = { + static const unsigned char rsa[] = { 0, 0, 0, 0x07, 's', 's', 'h', '-', 'r', 's', 'a' }; - const unsigned char dss[] = { + static const unsigned char dss[] = { 0, 0, 0, 0x07, 's', 's', 'h', '-', 'd', 's', 's' }; + static const unsigned char ecdsa_256[] = { + 0, 0, 0, 0x13, 'e', 'c', 'd', 's', 'a', '-', 's', 'h', 'a', '2', '-', + 'n', 'i', 's', 't', 'p', '2', '5', '6' + }; + static const unsigned char ecdsa_384[] = { + 0, 0, 0, 0x13, 'e', 'c', 'd', 's', 'a', '-', 's', 'h', 'a', '2', '-', + 'n', 'i', 's', 't', 'p', '3', '8', '4' + }; + static const unsigned char ecdsa_521[] = { + 0, 0, 0, 0x13, 'e', 'c', 'd', 's', 'a', '-', 's', 'h', 'a', '2', '-', + 'n', 'i', 's', 't', 'p', '5', '2', '1' + }; + static const unsigned char ed25519[] = { + 0, 0, 0, 0x0b, 's', 's', 'h', '-', 'e', 'd', '2', '5', '5', '1', '9' + }; - if (len < 11) + if(len < 11) return LIBSSH2_HOSTKEY_TYPE_UNKNOWN; - if (!memcmp(rsa, hostkey, 11)) + if(!memcmp(rsa, hostkey, 11)) return LIBSSH2_HOSTKEY_TYPE_RSA; - if (!memcmp(dss, hostkey, 11)) + if(!memcmp(dss, hostkey, 11)) return LIBSSH2_HOSTKEY_TYPE_DSS; + if(len < 15) + return LIBSSH2_HOSTKEY_TYPE_UNKNOWN; + + if(!memcmp(ed25519, hostkey, 15)) + return LIBSSH2_HOSTKEY_TYPE_ED25519; + + if(len < 23) + return LIBSSH2_HOSTKEY_TYPE_UNKNOWN; + + if(!memcmp(ecdsa_256, hostkey, 23)) + return LIBSSH2_HOSTKEY_TYPE_ECDSA_256; + + if(!memcmp(ecdsa_384, hostkey, 23)) + return LIBSSH2_HOSTKEY_TYPE_ECDSA_384; + + if(!memcmp(ecdsa_521, hostkey, 23)) + return LIBSSH2_HOSTKEY_TYPE_ECDSA_521; + return LIBSSH2_HOSTKEY_TYPE_UNKNOWN; } @@ -561,7 +1349,7 @@ libssh2_session_hostkey(LIBSSH2_SESSION *session, size_t *len, int *type) if(session->server_hostkey_len) { if(len) *len = session->server_hostkey_len; - if (type) + if(type) *type = hostkey_type(session->server_hostkey, session->server_hostkey_len); return (char *) session->server_hostkey; @@ -570,4 +1358,3 @@ libssh2_session_hostkey(LIBSSH2_SESSION *session, size_t *len, int *type) *len = 0; return NULL; } - diff --git a/vendor/libssh2/src/keepalive.c b/vendor/libssh2/src/keepalive.c index fd749dd296..2151b17100 100644 --- a/vendor/libssh2/src/keepalive.c +++ b/vendor/libssh2/src/keepalive.c @@ -46,7 +46,7 @@ libssh2_keepalive_config (LIBSSH2_SESSION *session, int want_reply, unsigned interval) { - if (interval == 1) + if(interval == 1) session->keepalive_interval = 2; else session->keepalive_interval = interval; @@ -59,20 +59,20 @@ libssh2_keepalive_send (LIBSSH2_SESSION *session, { time_t now; - if (!session->keepalive_interval) { - if (seconds_to_next) + if(!session->keepalive_interval) { + if(seconds_to_next) *seconds_to_next = 0; return 0; } - now = time (NULL); + now = time(NULL); - if (session->keepalive_last_sent + session->keepalive_interval <= now) { + if(session->keepalive_last_sent + session->keepalive_interval <= now) { /* Format is "SSH_MSG_GLOBAL_REQUEST || 4-byte len || str || want-reply". */ unsigned char keepalive_data[] = "\x50\x00\x00\x00\x15keepalive@libssh2.orgW"; - size_t len = sizeof (keepalive_data) - 1; + size_t len = sizeof(keepalive_data) - 1; int rc; keepalive_data[len - 1] = @@ -81,16 +81,17 @@ libssh2_keepalive_send (LIBSSH2_SESSION *session, rc = _libssh2_transport_send(session, keepalive_data, len, NULL, 0); /* Silently ignore PACKET_EAGAIN here: if the write buffer is already full, sending another keepalive is not useful. */ - if (rc && rc != LIBSSH2_ERROR_EAGAIN) { + if(rc && rc != LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send keepalive message"); return rc; } session->keepalive_last_sent = now; - if (seconds_to_next) + if(seconds_to_next) *seconds_to_next = session->keepalive_interval; - } else if (seconds_to_next) { + } + else if(seconds_to_next) { *seconds_to_next = (int) (session->keepalive_last_sent - now) + session->keepalive_interval; } diff --git a/vendor/libssh2/src/kex.c b/vendor/libssh2/src/kex.c index 65b722f421..f45d48a2bd 100644 --- a/vendor/libssh2/src/kex.c +++ b/vendor/libssh2/src/kex.c @@ -1,5 +1,5 @@ /* Copyright (c) 2004-2007, Sara Golemon - * Copyright (c) 2010, Daniel Stenberg + * Copyright (c) 2010-2019, Daniel Stenberg * All rights reserved. * * Redistribution and use in source and binary forms, @@ -42,113 +42,243 @@ #include "comp.h" #include "mac.h" +#include + +/* define SHA1_DIGEST_LENGTH for the macro below */ +#ifndef SHA1_DIGEST_LENGTH +#define SHA1_DIGEST_LENGTH SHA_DIGEST_LENGTH +#endif + /* TODO: Switch this to an inline and handle alloc() failures */ -/* Helper macro called from kex_method_diffie_hellman_group1_sha1_key_exchange */ -#define LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(value, reqlen, version) \ - { \ - libssh2_sha1_ctx hash; \ - unsigned long len = 0; \ - if (!(value)) { \ - value = LIBSSH2_ALLOC(session, reqlen + SHA_DIGEST_LENGTH); \ - } \ - if (value) \ - while (len < (unsigned long)reqlen) { \ - libssh2_sha1_init(&hash); \ - libssh2_sha1_update(hash, exchange_state->k_value, \ - exchange_state->k_value_len); \ - libssh2_sha1_update(hash, exchange_state->h_sig_comp, \ - SHA_DIGEST_LENGTH); \ - if (len > 0) { \ - libssh2_sha1_update(hash, value, len); \ - } else { \ - libssh2_sha1_update(hash, (version), 1); \ - libssh2_sha1_update(hash, session->session_id, \ - session->session_id_len); \ - } \ - libssh2_sha1_final(hash, (value) + len); \ - len += SHA_DIGEST_LENGTH; \ - } \ - } - - -/* Helper macro called from kex_method_diffie_hellman_group1_sha256_key_exchange */ -#define LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA256_HASH(value, reqlen, version) \ - { \ - libssh2_sha256_ctx hash; \ - unsigned long len = 0; \ - if (!(value)) { \ - value = LIBSSH2_ALLOC(session, reqlen + SHA256_DIGEST_LENGTH); \ - } \ - if (value) \ - while (len < (unsigned long)reqlen) { \ - libssh2_sha256_init(&hash); \ - libssh2_sha256_update(hash, exchange_state->k_value, \ - exchange_state->k_value_len); \ - libssh2_sha256_update(hash, exchange_state->h_sig_comp, \ - SHA256_DIGEST_LENGTH); \ - if (len > 0) { \ - libssh2_sha256_update(hash, value, len); \ - } else { \ - libssh2_sha256_update(hash, (version), 1); \ - libssh2_sha256_update(hash, session->session_id, \ - session->session_id_len); \ - } \ - libssh2_sha256_final(hash, (value) + len); \ - len += SHA256_DIGEST_LENGTH; \ - } \ +/* Helper macro called from + kex_method_diffie_hellman_group1_sha1_key_exchange */ + +#define LIBSSH2_KEX_METHOD_EC_SHA_VALUE_HASH(value, reqlen, version) \ + { \ + if(type == LIBSSH2_EC_CURVE_NISTP256) { \ + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, value, reqlen, version); \ + } \ + else if(type == LIBSSH2_EC_CURVE_NISTP384) { \ + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(384, value, reqlen, version); \ + } \ + else if(type == LIBSSH2_EC_CURVE_NISTP521) { \ + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(512, value, reqlen, version); \ + } \ + } \ + + +#define LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(digest_type, value, \ + reqlen, version) \ +{ \ + libssh2_sha##digest_type##_ctx hash; \ + unsigned long len = 0; \ + if(!(value)) { \ + value = LIBSSH2_ALLOC(session, \ + reqlen + SHA##digest_type##_DIGEST_LENGTH); \ + } \ + if(value) \ + while(len < (unsigned long)reqlen) { \ + libssh2_sha##digest_type##_init(&hash); \ + libssh2_sha##digest_type##_update(hash, \ + exchange_state->k_value, \ + exchange_state->k_value_len); \ + libssh2_sha##digest_type##_update(hash, \ + exchange_state->h_sig_comp, \ + SHA##digest_type##_DIGEST_LENGTH); \ + if(len > 0) { \ + libssh2_sha##digest_type##_update(hash, value, len); \ + } \ + else { \ + libssh2_sha##digest_type##_update(hash, (version), 1); \ + libssh2_sha##digest_type##_update(hash, session->session_id,\ + session->session_id_len); \ + } \ + libssh2_sha##digest_type##_final(hash, (value) + len); \ + len += SHA##digest_type##_DIGEST_LENGTH; \ + } \ +} + +/*! + * @note The following are wrapper functions used by diffie_hellman_sha_algo(). + * TODO: Switch backend SHA macros to functions to allow function pointers + * @discussion Ideally these would be function pointers but the backend macros + * don't allow it so we have to wrap them up in helper functions + */ + +static void _libssh2_sha_algo_ctx_init(int sha_algo, void *ctx) +{ + if(sha_algo == 512) { + libssh2_sha512_init((libssh2_sha512_ctx*)ctx); + } + else if(sha_algo == 384) { + libssh2_sha384_init((libssh2_sha384_ctx*)ctx); + } + else if(sha_algo == 256) { + libssh2_sha256_init((libssh2_sha256_ctx*)ctx); + } + else if(sha_algo == 1) { + libssh2_sha1_init((libssh2_sha1_ctx*)ctx); + } + else { + assert(0); + } +} + +static void _libssh2_sha_algo_ctx_update(int sha_algo, void *ctx, + void *data, size_t len) +{ + if(sha_algo == 512) { + libssh2_sha512_ctx *_ctx = (libssh2_sha512_ctx*)ctx; + libssh2_sha512_update(*_ctx, data, len); } + else if(sha_algo == 384) { + libssh2_sha384_ctx *_ctx = (libssh2_sha384_ctx*)ctx; + libssh2_sha384_update(*_ctx, data, len); + } + else if(sha_algo == 256) { + libssh2_sha256_ctx *_ctx = (libssh2_sha256_ctx*)ctx; + libssh2_sha256_update(*_ctx, data, len); + } + else if(sha_algo == 1) { + libssh2_sha1_ctx *_ctx = (libssh2_sha1_ctx*)ctx; + libssh2_sha1_update(*_ctx, data, len); + } + else { +#if LIBSSH2DEBUG + assert(0); +#endif + } +} +static void _libssh2_sha_algo_ctx_final(int sha_algo, void *ctx, + void *hash) +{ + if(sha_algo == 512) { + libssh2_sha512_ctx *_ctx = (libssh2_sha512_ctx*)ctx; + libssh2_sha512_final(*_ctx, hash); + } + else if(sha_algo == 384) { + libssh2_sha384_ctx *_ctx = (libssh2_sha384_ctx*)ctx; + libssh2_sha384_final(*_ctx, hash); + } + else if(sha_algo == 256) { + libssh2_sha256_ctx *_ctx = (libssh2_sha256_ctx*)ctx; + libssh2_sha256_final(*_ctx, hash); + } + else if(sha_algo == 1) { + libssh2_sha1_ctx *_ctx = (libssh2_sha1_ctx*)ctx; + libssh2_sha1_final(*_ctx, hash); + } + else { +#if LIBSSH2DEBUG + assert(0); +#endif + } +} -/* - * diffie_hellman_sha1 - * - * Diffie Hellman Key Exchange, Group Agnostic +static void _libssh2_sha_algo_value_hash(int sha_algo, + LIBSSH2_SESSION *session, + kmdhgGPshakex_state_t *exchange_state, + unsigned char **data, size_t data_len, + const unsigned char *version) +{ + if(sha_algo == 512) { + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(512, *data, data_len, version); + } + else if(sha_algo == 384) { + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(384, *data, data_len, version); + } + else if(sha_algo == 256) { + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, *data, data_len, version); + } + else if(sha_algo == 1) { + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(1, *data, data_len, version); + } + else { +#if LIBSSH2DEBUG + assert(0); +#endif + } +} + + +/*! + * @function diffie_hellman_sha_algo + * @abstract Diffie Hellman Key Exchange, Group Agnostic, + * SHA Algorithm Agnostic + * @result 0 on success, error code on failure */ -static int diffie_hellman_sha1(LIBSSH2_SESSION *session, - _libssh2_bn *g, - _libssh2_bn *p, - int group_order, - unsigned char packet_type_init, - unsigned char packet_type_reply, - unsigned char *midhash, - unsigned long midhash_len, - kmdhgGPshakex_state_t *exchange_state) +static int diffie_hellman_sha_algo(LIBSSH2_SESSION *session, + _libssh2_bn *g, + _libssh2_bn *p, + int group_order, + int sha_algo_value, + void *exchange_hash_ctx, + unsigned char packet_type_init, + unsigned char packet_type_reply, + unsigned char *midhash, + unsigned long midhash_len, + kmdhgGPshakex_state_t *exchange_state) { int ret = 0; int rc; - libssh2_sha1_ctx exchange_hash_ctx; - if (exchange_state->state == libssh2_NB_state_idle) { + int digest_len = 0; + + if(sha_algo_value == 512) + digest_len = SHA512_DIGEST_LENGTH; + else if(sha_algo_value == 384) + digest_len = SHA384_DIGEST_LENGTH; + else if(sha_algo_value == 256) + digest_len = SHA256_DIGEST_LENGTH; + else if(sha_algo_value == 1) + digest_len = SHA1_DIGEST_LENGTH; + else { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "sha algo value is unimplemented"); + goto clean_exit; + } + + if(exchange_state->state == libssh2_NB_state_idle) { /* Setup initial values */ exchange_state->e_packet = NULL; exchange_state->s_packet = NULL; exchange_state->k_value = NULL; exchange_state->ctx = _libssh2_bn_ctx_new(); - exchange_state->x = _libssh2_bn_init(); /* Random from client */ + libssh2_dh_init(&exchange_state->x); exchange_state->e = _libssh2_bn_init(); /* g^x mod p */ - exchange_state->f = _libssh2_bn_init_from_bin(); /* g^(Random from server) mod p */ - exchange_state->k = _libssh2_bn_init(); /* The shared secret: f^x mod p */ + exchange_state->f = _libssh2_bn_init_from_bin(); /* g^(Random from + server) mod p */ + exchange_state->k = _libssh2_bn_init(); /* The shared secret: f^x mod + p */ /* Zero the whole thing out */ memset(&exchange_state->req_state, 0, sizeof(packet_require_state_t)); /* Generate x and e */ - _libssh2_bn_rand(exchange_state->x, group_order * 8 - 1, 0, -1); - _libssh2_bn_mod_exp(exchange_state->e, g, exchange_state->x, p, - exchange_state->ctx); + if(_libssh2_bn_bits(p) > LIBSSH2_DH_MAX_MODULUS_BITS) { + ret = _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "dh modulus value is too large"); + goto clean_exit; + } + + rc = libssh2_dh_key_pair(&exchange_state->x, exchange_state->e, g, p, + group_order, exchange_state->ctx); + if(rc) + goto clean_exit; /* Send KEX init */ /* packet_type(1) + String Length(4) + leading 0(1) */ exchange_state->e_packet_len = _libssh2_bn_bytes(exchange_state->e) + 6; - if (_libssh2_bn_bits(exchange_state->e) % 8) { + if(_libssh2_bn_bits(exchange_state->e) % 8) { /* Leading 00 not needed */ exchange_state->e_packet_len--; } exchange_state->e_packet = LIBSSH2_ALLOC(session, exchange_state->e_packet_len); - if (!exchange_state->e_packet) { + if(!exchange_state->e_packet) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Out of memory error"); goto clean_exit; @@ -156,10 +286,11 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, exchange_state->e_packet[0] = packet_type_init; _libssh2_htonu32(exchange_state->e_packet + 1, exchange_state->e_packet_len - 5); - if (_libssh2_bn_bits(exchange_state->e) % 8) { + if(_libssh2_bn_bits(exchange_state->e) % 8) { _libssh2_bn_to_bin(exchange_state->e, exchange_state->e_packet + 5); - } else { + } + else { exchange_state->e_packet[5] = 0; _libssh2_bn_to_bin(exchange_state->e, exchange_state->e_packet + 6); @@ -170,13 +301,14 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, exchange_state->state = libssh2_NB_state_created; } - if (exchange_state->state == libssh2_NB_state_created) { + if(exchange_state->state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, exchange_state->e_packet, exchange_state->e_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { ret = _libssh2_error(session, rc, "Unable to send KEX init message"); goto clean_exit; @@ -184,20 +316,22 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, exchange_state->state = libssh2_NB_state_sent; } - if (exchange_state->state == libssh2_NB_state_sent) { - if (session->burn_optimistic_kexinit) { + if(exchange_state->state == libssh2_NB_state_sent) { + if(session->burn_optimistic_kexinit) { /* The first KEX packet to come along will be the guess initially * sent by the server. That guess turned out to be wrong so we * need to silently ignore it */ int burn_type; _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Waiting for badly guessed KEX packet (to be ignored)"); + "Waiting for badly guessed KEX packet " + "(to be ignored)"); burn_type = _libssh2_packet_burn(session, &exchange_state->burn_state); - if (burn_type == LIBSSH2_ERROR_EAGAIN) { + if(burn_type == LIBSSH2_ERROR_EAGAIN) { return burn_type; - } else if (burn_type <= 0) { + } + else if(burn_type <= 0) { /* Failed to receive a packet */ ret = burn_type; goto clean_exit; @@ -212,47 +346,53 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, exchange_state->state = libssh2_NB_state_sent1; } - if (exchange_state->state == libssh2_NB_state_sent1) { + if(exchange_state->state == libssh2_NB_state_sent1) { /* Wait for KEX reply */ + struct string_buf buf; + size_t host_key_len; + rc = _libssh2_packet_require(session, packet_type_reply, &exchange_state->s_packet, &exchange_state->s_packet_len, 0, NULL, 0, &exchange_state->req_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } - if (rc) { + if(rc) { ret = _libssh2_error(session, LIBSSH2_ERROR_TIMEOUT, "Timed out waiting for KEX reply"); goto clean_exit; } /* Parse KEXDH_REPLY */ - exchange_state->s = exchange_state->s_packet + 1; + if(exchange_state->s_packet_len < 5) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet length"); + goto clean_exit; + } - session->server_hostkey_len = _libssh2_ntohu32(exchange_state->s); - exchange_state->s += 4; + buf.data = exchange_state->s_packet; + buf.len = exchange_state->s_packet_len; + buf.dataptr = buf.data; + buf.dataptr++; /* advance past type */ - if (session->server_hostkey) + if(session->server_hostkey) LIBSSH2_FREE(session, session->server_hostkey); - session->server_hostkey = - LIBSSH2_ALLOC(session, session->server_hostkey_len); - if (!session->server_hostkey) { + if(_libssh2_copy_string(session, &buf, &(session->server_hostkey), + &host_key_len)) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Unable to allocate memory for a copy " - "of the host key"); + "Could not copy host key"); goto clean_exit; } - memcpy(session->server_hostkey, exchange_state->s, - session->server_hostkey_len); - exchange_state->s += session->server_hostkey_len; + + session->server_hostkey_len = (uint32_t)host_key_len; #if LIBSSH2_MD5 { libssh2_md5_ctx fingerprint_ctx; - if (libssh2_md5_init(&fingerprint_ctx)) { + if(libssh2_md5_init(&fingerprint_ctx)) { libssh2_md5_update(fingerprint_ctx, session->server_hostkey, session->server_hostkey_len); libssh2_md5_final(fingerprint_ctx, @@ -280,7 +420,7 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, { libssh2_sha1_ctx fingerprint_ctx; - if (libssh2_sha1_init(&fingerprint_ctx)) { + if(libssh2_sha1_init(&fingerprint_ctx)) { libssh2_sha1_update(fingerprint_ctx, session->server_hostkey, session->server_hostkey_len); libssh2_sha1_final(fingerprint_ctx, @@ -305,7 +445,38 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, } #endif /* LIBSSH2DEBUG */ - if (session->hostkey->init(session, session->server_hostkey, + { + libssh2_sha256_ctx fingerprint_ctx; + + if(libssh2_sha256_init(&fingerprint_ctx)) { + libssh2_sha256_update(fingerprint_ctx, session->server_hostkey, + session->server_hostkey_len); + libssh2_sha256_final(fingerprint_ctx, + session->server_hostkey_sha256); + session->server_hostkey_sha256_valid = TRUE; + } + else { + session->server_hostkey_sha256_valid = FALSE; + } + } +#ifdef LIBSSH2DEBUG + { + char *base64Fingerprint = NULL; + _libssh2_base64_encode(session, + (const char *) + session->server_hostkey_sha256, + SHA256_DIGEST_LENGTH, &base64Fingerprint); + if(base64Fingerprint != NULL) { + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server's SHA256 Fingerprint: %s", + base64Fingerprint); + LIBSSH2_FREE(session, base64Fingerprint); + } + } +#endif /* LIBSSH2DEBUG */ + + + if(session->hostkey->init(session, session->server_hostkey, session->server_hostkey_len, &session->server_hostkey_abstract)) { ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, @@ -313,95 +484,104 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, goto clean_exit; } - exchange_state->f_value_len = _libssh2_ntohu32(exchange_state->s); - exchange_state->s += 4; - exchange_state->f_value = exchange_state->s; - exchange_state->s += exchange_state->f_value_len; + if(_libssh2_get_string(&buf, &(exchange_state->f_value), + &(exchange_state->f_value_len))) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Unable to get f value"); + goto clean_exit; + } + _libssh2_bn_from_bin(exchange_state->f, exchange_state->f_value_len, exchange_state->f_value); - exchange_state->h_sig_len = _libssh2_ntohu32(exchange_state->s); - exchange_state->s += 4; - exchange_state->h_sig = exchange_state->s; + if(_libssh2_get_string(&buf, &(exchange_state->h_sig), + &(exchange_state->h_sig_len))) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Unable to get h sig"); + goto clean_exit; + } /* Compute the shared secret */ - _libssh2_bn_mod_exp(exchange_state->k, exchange_state->f, - exchange_state->x, p, exchange_state->ctx); + libssh2_dh_secret(&exchange_state->x, exchange_state->k, + exchange_state->f, p, exchange_state->ctx); exchange_state->k_value_len = _libssh2_bn_bytes(exchange_state->k) + 5; - if (_libssh2_bn_bits(exchange_state->k) % 8) { + if(_libssh2_bn_bits(exchange_state->k) % 8) { /* don't need leading 00 */ exchange_state->k_value_len--; } exchange_state->k_value = LIBSSH2_ALLOC(session, exchange_state->k_value_len); - if (!exchange_state->k_value) { + if(!exchange_state->k_value) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate buffer for K"); goto clean_exit; } _libssh2_htonu32(exchange_state->k_value, exchange_state->k_value_len - 4); - if (_libssh2_bn_bits(exchange_state->k) % 8) { + if(_libssh2_bn_bits(exchange_state->k) % 8) { _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 4); - } else { + } + else { exchange_state->k_value[4] = 0; _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 5); } - exchange_state->exchange_hash = (void*)&exchange_hash_ctx; - libssh2_sha1_init(&exchange_hash_ctx); + exchange_state->exchange_hash = (void *)&exchange_hash_ctx; + _libssh2_sha_algo_ctx_init(sha_algo_value, exchange_hash_ctx); - if (session->local.banner) { + if(session->local.banner) { _libssh2_htonu32(exchange_state->h_sig_comp, strlen((char *) session->local.banner) - 2); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha1_update(exchange_hash_ctx, - (char *) session->local.banner, + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + session->local.banner, strlen((char *) session->local.banner) - 2); - } else { + } + else { _libssh2_htonu32(exchange_state->h_sig_comp, sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha1_update(exchange_hash_ctx, - LIBSSH2_SSH_DEFAULT_BANNER, - sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + (unsigned char *) + LIBSSH2_SSH_DEFAULT_BANNER, + sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); } _libssh2_htonu32(exchange_state->h_sig_comp, strlen((char *) session->remote.banner)); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha1_update(exchange_hash_ctx, - session->remote.banner, - strlen((char *) session->remote.banner)); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + session->remote.banner, + strlen((char *) session->remote.banner)); _libssh2_htonu32(exchange_state->h_sig_comp, session->local.kexinit_len); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha1_update(exchange_hash_ctx, - session->local.kexinit, - session->local.kexinit_len); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + session->local.kexinit, + session->local.kexinit_len); _libssh2_htonu32(exchange_state->h_sig_comp, session->remote.kexinit_len); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha1_update(exchange_hash_ctx, - session->remote.kexinit, - session->remote.kexinit_len); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + session->remote.kexinit, + session->remote.kexinit_len); _libssh2_htonu32(exchange_state->h_sig_comp, session->server_hostkey_len); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha1_update(exchange_hash_ctx, - session->server_hostkey, - session->server_hostkey_len); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + session->server_hostkey, + session->server_hostkey_len); - if (packet_type_init == SSH_MSG_KEX_DH_GEX_INIT) { + if(packet_type_init == SSH_MSG_KEX_DH_GEX_INIT) { /* diffie-hellman-group-exchange hashes additional fields */ #ifdef LIBSSH2_DH_GEX_NEW _libssh2_htonu32(exchange_state->h_sig_comp, @@ -410,44 +590,44 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, LIBSSH2_DH_GEX_OPTGROUP); _libssh2_htonu32(exchange_state->h_sig_comp + 8, LIBSSH2_DH_GEX_MAXGROUP); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 12); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 12); #else _libssh2_htonu32(exchange_state->h_sig_comp, LIBSSH2_DH_GEX_OPTGROUP); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); #endif } - if (midhash) { - libssh2_sha1_update(exchange_hash_ctx, midhash, - midhash_len); + if(midhash) { + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + midhash, midhash_len); } - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->e_packet + 1, - exchange_state->e_packet_len - 1); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->e_packet + 1, + exchange_state->e_packet_len - 1); _libssh2_htonu32(exchange_state->h_sig_comp, exchange_state->f_value_len); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->f_value, - exchange_state->f_value_len); - - libssh2_sha1_update(exchange_hash_ctx, - exchange_state->k_value, - exchange_state->k_value_len); - - libssh2_sha1_final(exchange_hash_ctx, - exchange_state->h_sig_comp); - - if (session->hostkey-> - sig_verify(session, exchange_state->h_sig, - exchange_state->h_sig_len, exchange_state->h_sig_comp, - 20, &session->server_hostkey_abstract)) { + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp, 4); + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->f_value, + exchange_state->f_value_len); + + _libssh2_sha_algo_ctx_update(sha_algo_value, exchange_hash_ctx, + exchange_state->k_value, + exchange_state->k_value_len); + + _libssh2_sha_algo_ctx_final(sha_algo_value, exchange_hash_ctx, + exchange_state->h_sig_comp); + + if(session->hostkey-> + sig_verify(session, exchange_state->h_sig, + exchange_state->h_sig_len, exchange_state->h_sig_comp, + digest_len, &session->server_hostkey_abstract)) { ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_SIGN, "Unable to verify hostkey signature"); goto clean_exit; @@ -459,26 +639,29 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, exchange_state->state = libssh2_NB_state_sent2; } - if (exchange_state->state == libssh2_NB_state_sent2) { + if(exchange_state->state == libssh2_NB_state_sent2) { rc = _libssh2_transport_send(session, &exchange_state->c, 1, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { - ret = _libssh2_error(session, rc, "Unable to send NEWKEYS message"); + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Unable to send NEWKEYS message"); goto clean_exit; } exchange_state->state = libssh2_NB_state_sent3; } - if (exchange_state->state == libssh2_NB_state_sent3) { + if(exchange_state->state == libssh2_NB_state_sent3) { rc = _libssh2_packet_require(session, SSH_MSG_NEWKEYS, &exchange_state->tmp, &exchange_state->tmp_len, 0, NULL, 0, &exchange_state->req_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { ret = _libssh2_error(session, rc, "Timed out waiting for NEWKEYS"); goto clean_exit; } @@ -491,46 +674,52 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, for this packet type anyway */ LIBSSH2_FREE(session, exchange_state->tmp); - if (!session->session_id) { - session->session_id = LIBSSH2_ALLOC(session, SHA_DIGEST_LENGTH); - if (!session->session_id) { + if(!session->session_id) { + session->session_id = LIBSSH2_ALLOC(session, digest_len); + if(!session->session_id) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Unable to allocate buffer for SHA digest"); + "Unable to allocate buffer for " + "SHA digest"); goto clean_exit; } memcpy(session->session_id, exchange_state->h_sig_comp, - SHA_DIGEST_LENGTH); - session->session_id_len = SHA_DIGEST_LENGTH; - _libssh2_debug(session, LIBSSH2_TRACE_KEX, "session_id calculated"); + digest_len); + session->session_id_len = digest_len; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "session_id calculated"); } /* Cleanup any existing cipher */ - if (session->local.crypt->dtor) { + if(session->local.crypt->dtor) { session->local.crypt->dtor(session, &session->local.crypt_abstract); } /* Calculate IV/Secret/Key for each direction */ - if (session->local.crypt->init) { + if(session->local.crypt->init) { unsigned char *iv = NULL, *secret = NULL; int free_iv = 0, free_secret = 0; - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(iv, - session->local.crypt-> - iv_len, "A"); - if (!iv) { + _libssh2_sha_algo_value_hash(sha_algo_value, session, + exchange_state, &iv, + session->local.crypt->iv_len, + (const unsigned char *)"A"); + + if(!iv) { ret = -1; goto clean_exit; } - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(secret, - session->local.crypt-> - secret_len, "C"); - if (!secret) { + _libssh2_sha_algo_value_hash(sha_algo_value, session, + exchange_state, &secret, + session->local.crypt->secret_len, + (const unsigned char *)"C"); + + if(!secret) { LIBSSH2_FREE(session, iv); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } - if (session->local.crypt-> + if(session->local.crypt-> init(session, session->local.crypt, iv, &free_iv, secret, &free_secret, 1, &session->local.crypt_abstract)) { LIBSSH2_FREE(session, iv); @@ -539,45 +728,48 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, goto clean_exit; } - if (free_iv) { - memset(iv, 0, session->local.crypt->iv_len); + if(free_iv) { + _libssh2_explicit_zero(iv, session->local.crypt->iv_len); LIBSSH2_FREE(session, iv); } - if (free_secret) { - memset(secret, 0, session->local.crypt->secret_len); + if(free_secret) { + _libssh2_explicit_zero(secret, + session->local.crypt->secret_len); LIBSSH2_FREE(session, secret); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Client to Server IV and Key calculated"); - if (session->remote.crypt->dtor) { + if(session->remote.crypt->dtor) { /* Cleanup any existing cipher */ session->remote.crypt->dtor(session, &session->remote.crypt_abstract); } - if (session->remote.crypt->init) { + if(session->remote.crypt->init) { unsigned char *iv = NULL, *secret = NULL; int free_iv = 0, free_secret = 0; - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(iv, - session->remote.crypt-> - iv_len, "B"); - if (!iv) { + _libssh2_sha_algo_value_hash(sha_algo_value, session, + exchange_state, &iv, + session->remote.crypt->iv_len, + (const unsigned char *)"B"); + if(!iv) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(secret, - session->remote.crypt-> - secret_len, "D"); - if (!secret) { + _libssh2_sha_algo_value_hash(sha_algo_value, session, + exchange_state, &secret, + session->remote.crypt->secret_len, + (const unsigned char *)"D"); + if(!secret) { LIBSSH2_FREE(session, iv); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } - if (session->remote.crypt-> + if(session->remote.crypt-> init(session, session->remote.crypt, iv, &free_iv, secret, &free_secret, 0, &session->remote.crypt_abstract)) { LIBSSH2_FREE(session, iv); @@ -586,65 +778,68 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, goto clean_exit; } - if (free_iv) { - memset(iv, 0, session->remote.crypt->iv_len); + if(free_iv) { + _libssh2_explicit_zero(iv, session->remote.crypt->iv_len); LIBSSH2_FREE(session, iv); } - if (free_secret) { - memset(secret, 0, session->remote.crypt->secret_len); + if(free_secret) { + _libssh2_explicit_zero(secret, + session->remote.crypt->secret_len); LIBSSH2_FREE(session, secret); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Server to Client IV and Key calculated"); - if (session->local.mac->dtor) { + if(session->local.mac->dtor) { session->local.mac->dtor(session, &session->local.mac_abstract); } - if (session->local.mac->init) { + if(session->local.mac->init) { unsigned char *key = NULL; int free_key = 0; - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(key, - session->local.mac-> - key_len, "E"); - if (!key) { + _libssh2_sha_algo_value_hash(sha_algo_value, session, + exchange_state, &key, + session->local.mac->key_len, + (const unsigned char *)"E"); + if(!key) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } session->local.mac->init(session, key, &free_key, &session->local.mac_abstract); - if (free_key) { - memset(key, 0, session->local.mac->key_len); + if(free_key) { + _libssh2_explicit_zero(key, session->local.mac->key_len); LIBSSH2_FREE(session, key); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Client to Server HMAC Key calculated"); - if (session->remote.mac->dtor) { + if(session->remote.mac->dtor) { session->remote.mac->dtor(session, &session->remote.mac_abstract); } - if (session->remote.mac->init) { + if(session->remote.mac->init) { unsigned char *key = NULL; int free_key = 0; - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(key, - session->remote.mac-> - key_len, "F"); - if (!key) { + _libssh2_sha_algo_value_hash(sha_algo_value, session, + exchange_state, &key, + session->remote.mac->key_len, + (const unsigned char *)"F"); + if(!key) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } session->remote.mac->init(session, key, &free_key, &session->remote.mac_abstract); - if (free_key) { - memset(key, 0, session->remote.mac->key_len); + if(free_key) { + _libssh2_explicit_zero(key, session->remote.mac->key_len); LIBSSH2_FREE(session, key); } } @@ -654,13 +849,13 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, /* Initialize compression for each direction */ /* Cleanup any existing compression */ - if (session->local.comp && session->local.comp->dtor) { + if(session->local.comp && session->local.comp->dtor) { session->local.comp->dtor(session, 1, &session->local.comp_abstract); } - if (session->local.comp && session->local.comp->init) { - if (session->local.comp->init(session, 1, + if(session->local.comp && session->local.comp->init) { + if(session->local.comp->init(session, 1, &session->local.comp_abstract)) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; @@ -669,13 +864,13 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Client to Server compression initialized"); - if (session->remote.comp && session->remote.comp->dtor) { + if(session->remote.comp && session->remote.comp->dtor) { session->remote.comp->dtor(session, 0, &session->remote.comp_abstract); } - if (session->remote.comp && session->remote.comp->init) { - if (session->remote.comp->init(session, 0, + if(session->remote.comp && session->remote.comp->init) { + if(session->remote.comp->init(session, 0, &session->remote.comp_abstract)) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; @@ -687,8 +882,7 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, } clean_exit: - _libssh2_bn_free(exchange_state->x); - exchange_state->x = NULL; + libssh2_dh_dtor(&exchange_state->x); _libssh2_bn_free(exchange_state->e); exchange_state->e = NULL; _libssh2_bn_free(exchange_state->f); @@ -698,17 +892,17 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, _libssh2_bn_ctx_free(exchange_state->ctx); exchange_state->ctx = NULL; - if (exchange_state->e_packet) { + if(exchange_state->e_packet) { LIBSSH2_FREE(session, exchange_state->e_packet); exchange_state->e_packet = NULL; } - if (exchange_state->s_packet) { + if(exchange_state->s_packet) { LIBSSH2_FREE(session, exchange_state->s_packet); exchange_state->s_packet = NULL; } - if (exchange_state->k_value) { + if(exchange_state->k_value) { LIBSSH2_FREE(session, exchange_state->k_value); exchange_state->k_value = NULL; } @@ -719,160 +913,1495 @@ static int diffie_hellman_sha1(LIBSSH2_SESSION *session, } -/* - * diffie_hellman_sha256 - * - * Diffie Hellman Key Exchange, Group Agnostic + +/* kex_method_diffie_hellman_group1_sha1_key_exchange + * Diffie-Hellman Group1 (Actually Group2) Key Exchange using SHA1 */ -static int diffie_hellman_sha256(LIBSSH2_SESSION *session, - _libssh2_bn *g, - _libssh2_bn *p, - int group_order, - unsigned char packet_type_init, - unsigned char packet_type_reply, - unsigned char *midhash, - unsigned long midhash_len, - kmdhgGPshakex_state_t *exchange_state) +static int +kex_method_diffie_hellman_group1_sha1_key_exchange(LIBSSH2_SESSION *session, + key_exchange_state_low_t + * key_state) { - int ret = 0; - int rc; - libssh2_sha256_ctx exchange_hash_ctx; - - if (exchange_state->state == libssh2_NB_state_idle) { - /* Setup initial values */ - exchange_state->e_packet = NULL; - exchange_state->s_packet = NULL; - exchange_state->k_value = NULL; - exchange_state->ctx = _libssh2_bn_ctx_new(); - exchange_state->x = _libssh2_bn_init(); /* Random from client */ - exchange_state->e = _libssh2_bn_init(); /* g^x mod p */ - exchange_state->f = _libssh2_bn_init_from_bin(); /* g^(Random from server) mod p */ - exchange_state->k = _libssh2_bn_init(); /* The shared secret: f^x mod p */ + static const unsigned char p_value[128] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34, + 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1, + 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74, + 0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x13, 0x9B, 0x22, + 0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD, + 0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B, + 0x30, 0x2B, 0x0A, 0x6D, 0xF2, 0x5F, 0x14, 0x37, + 0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45, + 0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6, + 0xF4, 0x4C, 0x42, 0xE9, 0xA6, 0x37, 0xED, 0x6B, + 0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED, + 0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5, + 0xAE, 0x9F, 0x24, 0x11, 0x7C, 0x4B, 0x1F, 0xE6, + 0x49, 0x28, 0x66, 0x51, 0xEC, 0xE6, 0x53, 0x81, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; - /* Zero the whole thing out */ - memset(&exchange_state->req_state, 0, sizeof(packet_require_state_t)); + int ret; + libssh2_sha1_ctx exchange_hash_ctx; - /* Generate x and e */ - _libssh2_bn_rand(exchange_state->x, group_order * 8 - 1, 0, -1); - _libssh2_bn_mod_exp(exchange_state->e, g, exchange_state->x, p, - exchange_state->ctx); + if(key_state->state == libssh2_NB_state_idle) { + /* g == 2 */ + key_state->p = _libssh2_bn_init_from_bin(); /* SSH2 defined value + (p_value) */ + key_state->g = _libssh2_bn_init(); /* SSH2 defined value (2) */ - /* Send KEX init */ - /* packet_type(1) + String Length(4) + leading 0(1) */ - exchange_state->e_packet_len = - _libssh2_bn_bytes(exchange_state->e) + 6; - if (_libssh2_bn_bits(exchange_state->e) % 8) { - /* Leading 00 not needed */ - exchange_state->e_packet_len--; - } + /* Initialize P and G */ + _libssh2_bn_set_word(key_state->g, 2); + _libssh2_bn_from_bin(key_state->p, 128, p_value); - exchange_state->e_packet = - LIBSSH2_ALLOC(session, exchange_state->e_packet_len); - if (!exchange_state->e_packet) { - ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Out of memory error"); - goto clean_exit; - } - exchange_state->e_packet[0] = packet_type_init; - _libssh2_htonu32(exchange_state->e_packet + 1, - exchange_state->e_packet_len - 5); - if (_libssh2_bn_bits(exchange_state->e) % 8) { - _libssh2_bn_to_bin(exchange_state->e, - exchange_state->e_packet + 5); - } else { - exchange_state->e_packet[5] = 0; - _libssh2_bn_to_bin(exchange_state->e, - exchange_state->e_packet + 6); - } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group1 Key Exchange"); - _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Sending KEX packet %d", - (int) packet_type_init); - exchange_state->state = libssh2_NB_state_created; + key_state->state = libssh2_NB_state_created; } - if (exchange_state->state == libssh2_NB_state_created) { - rc = _libssh2_transport_send(session, exchange_state->e_packet, - exchange_state->e_packet_len, - NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return rc; - } else if (rc) { - ret = _libssh2_error(session, rc, - "Unable to send KEX init message"); - goto clean_exit; - } - exchange_state->state = libssh2_NB_state_sent; + ret = diffie_hellman_sha_algo(session, key_state->g, key_state->p, 128, 1, + (void *)&exchange_hash_ctx, + SSH_MSG_KEXDH_INIT, SSH_MSG_KEXDH_REPLY, + NULL, 0, &key_state->exchange_state); + if(ret == LIBSSH2_ERROR_EAGAIN) { + return ret; } - if (exchange_state->state == libssh2_NB_state_sent) { - if (session->burn_optimistic_kexinit) { - /* The first KEX packet to come along will be the guess initially - * sent by the server. That guess turned out to be wrong so we - * need to silently ignore it */ - int burn_type; - - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Waiting for badly guessed KEX packet (to be ignored)"); - burn_type = - _libssh2_packet_burn(session, &exchange_state->burn_state); - if (burn_type == LIBSSH2_ERROR_EAGAIN) { - return burn_type; - } else if (burn_type <= 0) { - /* Failed to receive a packet */ - ret = burn_type; - goto clean_exit; - } - session->burn_optimistic_kexinit = 0; + _libssh2_bn_free(key_state->p); + key_state->p = NULL; + _libssh2_bn_free(key_state->g); + key_state->g = NULL; + key_state->state = libssh2_NB_state_idle; - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Burnt packet of type: %02x", - (unsigned int) burn_type); - } + return ret; +} - exchange_state->state = libssh2_NB_state_sent1; - } - if (exchange_state->state == libssh2_NB_state_sent1) { - /* Wait for KEX reply */ - rc = _libssh2_packet_require(session, packet_type_reply, - &exchange_state->s_packet, - &exchange_state->s_packet_len, 0, NULL, - 0, &exchange_state->req_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return rc; - } - if (rc) { - ret = _libssh2_error(session, LIBSSH2_ERROR_TIMEOUT, - "Timed out waiting for KEX reply"); - goto clean_exit; - } +/* kex_method_diffie_hellman_group14_key_exchange + * Diffie-Hellman Group14 Key Exchange with hash function callback + */ +typedef int (*diffie_hellman_hash_func_t)(LIBSSH2_SESSION *, + _libssh2_bn *, + _libssh2_bn *, + int, + int, + void *, + unsigned char, + unsigned char, + unsigned char *, + unsigned long, + kmdhgGPshakex_state_t *); +static int +kex_method_diffie_hellman_group14_key_exchange(LIBSSH2_SESSION *session, + key_exchange_state_low_t + * key_state, + int sha_algo_value, + void *exchange_hash_ctx, + diffie_hellman_hash_func_t + hashfunc) +{ + static const unsigned char p_value[256] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34, + 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1, + 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74, + 0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x13, 0x9B, 0x22, + 0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD, + 0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B, + 0x30, 0x2B, 0x0A, 0x6D, 0xF2, 0x5F, 0x14, 0x37, + 0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45, + 0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6, + 0xF4, 0x4C, 0x42, 0xE9, 0xA6, 0x37, 0xED, 0x6B, + 0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED, + 0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5, + 0xAE, 0x9F, 0x24, 0x11, 0x7C, 0x4B, 0x1F, 0xE6, + 0x49, 0x28, 0x66, 0x51, 0xEC, 0xE4, 0x5B, 0x3D, + 0xC2, 0x00, 0x7C, 0xB8, 0xA1, 0x63, 0xBF, 0x05, + 0x98, 0xDA, 0x48, 0x36, 0x1C, 0x55, 0xD3, 0x9A, + 0x69, 0x16, 0x3F, 0xA8, 0xFD, 0x24, 0xCF, 0x5F, + 0x83, 0x65, 0x5D, 0x23, 0xDC, 0xA3, 0xAD, 0x96, + 0x1C, 0x62, 0xF3, 0x56, 0x20, 0x85, 0x52, 0xBB, + 0x9E, 0xD5, 0x29, 0x07, 0x70, 0x96, 0x96, 0x6D, + 0x67, 0x0C, 0x35, 0x4E, 0x4A, 0xBC, 0x98, 0x04, + 0xF1, 0x74, 0x6C, 0x08, 0xCA, 0x18, 0x21, 0x7C, + 0x32, 0x90, 0x5E, 0x46, 0x2E, 0x36, 0xCE, 0x3B, + 0xE3, 0x9E, 0x77, 0x2C, 0x18, 0x0E, 0x86, 0x03, + 0x9B, 0x27, 0x83, 0xA2, 0xEC, 0x07, 0xA2, 0x8F, + 0xB5, 0xC5, 0x5D, 0xF0, 0x6F, 0x4C, 0x52, 0xC9, + 0xDE, 0x2B, 0xCB, 0xF6, 0x95, 0x58, 0x17, 0x18, + 0x39, 0x95, 0x49, 0x7C, 0xEA, 0x95, 0x6A, 0xE5, + 0x15, 0xD2, 0x26, 0x18, 0x98, 0xFA, 0x05, 0x10, + 0x15, 0x72, 0x8E, 0x5A, 0x8A, 0xAC, 0xAA, 0x68, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + int ret; - /* Parse KEXDH_REPLY */ - exchange_state->s = exchange_state->s_packet + 1; + if(key_state->state == libssh2_NB_state_idle) { + key_state->p = _libssh2_bn_init_from_bin(); /* SSH2 defined value + (p_value) */ + key_state->g = _libssh2_bn_init(); /* SSH2 defined value (2) */ - session->server_hostkey_len = _libssh2_ntohu32(exchange_state->s); - exchange_state->s += 4; + /* g == 2 */ + /* Initialize P and G */ + _libssh2_bn_set_word(key_state->g, 2); + _libssh2_bn_from_bin(key_state->p, 256, p_value); - if (session->server_hostkey) - LIBSSH2_FREE(session, session->server_hostkey); + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group14 Key Exchange"); - session->server_hostkey = - LIBSSH2_ALLOC(session, session->server_hostkey_len); - if (!session->server_hostkey) { - ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + key_state->state = libssh2_NB_state_created; + } + ret = hashfunc(session, key_state->g, key_state->p, + 256, sha_algo_value, exchange_hash_ctx, SSH_MSG_KEXDH_INIT, + SSH_MSG_KEXDH_REPLY, NULL, 0, &key_state->exchange_state); + if(ret == LIBSSH2_ERROR_EAGAIN) { + return ret; + } + + key_state->state = libssh2_NB_state_idle; + _libssh2_bn_free(key_state->p); + key_state->p = NULL; + _libssh2_bn_free(key_state->g); + key_state->g = NULL; + + return ret; +} + + + +/* kex_method_diffie_hellman_group14_sha1_key_exchange + * Diffie-Hellman Group14 Key Exchange using SHA1 + */ +static int +kex_method_diffie_hellman_group14_sha1_key_exchange(LIBSSH2_SESSION *session, + key_exchange_state_low_t + * key_state) +{ + libssh2_sha1_ctx ctx; + return kex_method_diffie_hellman_group14_key_exchange(session, + key_state, 1, + &ctx, + diffie_hellman_sha_algo); +} + + + +/* kex_method_diffie_hellman_group14_sha256_key_exchange + * Diffie-Hellman Group14 Key Exchange using SHA256 + */ +static int +kex_method_diffie_hellman_group14_sha256_key_exchange(LIBSSH2_SESSION *session, + key_exchange_state_low_t + * key_state) +{ + libssh2_sha256_ctx ctx; + return kex_method_diffie_hellman_group14_key_exchange(session, + key_state, 256, + &ctx, + diffie_hellman_sha_algo); +} + +/* kex_method_diffie_hellman_group16_sha512_key_exchange +* Diffie-Hellman Group16 Key Exchange using SHA512 +*/ +static int +kex_method_diffie_hellman_group16_sha512_key_exchange(LIBSSH2_SESSION *session, + key_exchange_state_low_t + * key_state) + +{ + static const unsigned char p_value[512] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC9, 0x0F, 0xDA, 0xA2, + 0x21, 0x68, 0xC2, 0x34, 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1, + 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74, 0x02, 0x0B, 0xBE, 0xA6, + 0x3B, 0x13, 0x9B, 0x22, 0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD, + 0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B, 0x30, 0x2B, 0x0A, 0x6D, + 0xF2, 0x5F, 0x14, 0x37, 0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45, + 0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6, 0xF4, 0x4C, 0x42, 0xE9, + 0xA6, 0x37, 0xED, 0x6B, 0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED, + 0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5, 0xAE, 0x9F, 0x24, 0x11, + 0x7C, 0x4B, 0x1F, 0xE6, 0x49, 0x28, 0x66, 0x51, 0xEC, 0xE4, 0x5B, 0x3D, + 0xC2, 0x00, 0x7C, 0xB8, 0xA1, 0x63, 0xBF, 0x05, 0x98, 0xDA, 0x48, 0x36, + 0x1C, 0x55, 0xD3, 0x9A, 0x69, 0x16, 0x3F, 0xA8, 0xFD, 0x24, 0xCF, 0x5F, + 0x83, 0x65, 0x5D, 0x23, 0xDC, 0xA3, 0xAD, 0x96, 0x1C, 0x62, 0xF3, 0x56, + 0x20, 0x85, 0x52, 0xBB, 0x9E, 0xD5, 0x29, 0x07, 0x70, 0x96, 0x96, 0x6D, + 0x67, 0x0C, 0x35, 0x4E, 0x4A, 0xBC, 0x98, 0x04, 0xF1, 0x74, 0x6C, 0x08, + 0xCA, 0x18, 0x21, 0x7C, 0x32, 0x90, 0x5E, 0x46, 0x2E, 0x36, 0xCE, 0x3B, + 0xE3, 0x9E, 0x77, 0x2C, 0x18, 0x0E, 0x86, 0x03, 0x9B, 0x27, 0x83, 0xA2, + 0xEC, 0x07, 0xA2, 0x8F, 0xB5, 0xC5, 0x5D, 0xF0, 0x6F, 0x4C, 0x52, 0xC9, + 0xDE, 0x2B, 0xCB, 0xF6, 0x95, 0x58, 0x17, 0x18, 0x39, 0x95, 0x49, 0x7C, + 0xEA, 0x95, 0x6A, 0xE5, 0x15, 0xD2, 0x26, 0x18, 0x98, 0xFA, 0x05, 0x10, + 0x15, 0x72, 0x8E, 0x5A, 0x8A, 0xAA, 0xC4, 0x2D, 0xAD, 0x33, 0x17, 0x0D, + 0x04, 0x50, 0x7A, 0x33, 0xA8, 0x55, 0x21, 0xAB, 0xDF, 0x1C, 0xBA, 0x64, + 0xEC, 0xFB, 0x85, 0x04, 0x58, 0xDB, 0xEF, 0x0A, 0x8A, 0xEA, 0x71, 0x57, + 0x5D, 0x06, 0x0C, 0x7D, 0xB3, 0x97, 0x0F, 0x85, 0xA6, 0xE1, 0xE4, 0xC7, + 0xAB, 0xF5, 0xAE, 0x8C, 0xDB, 0x09, 0x33, 0xD7, 0x1E, 0x8C, 0x94, 0xE0, + 0x4A, 0x25, 0x61, 0x9D, 0xCE, 0xE3, 0xD2, 0x26, 0x1A, 0xD2, 0xEE, 0x6B, + 0xF1, 0x2F, 0xFA, 0x06, 0xD9, 0x8A, 0x08, 0x64, 0xD8, 0x76, 0x02, 0x73, + 0x3E, 0xC8, 0x6A, 0x64, 0x52, 0x1F, 0x2B, 0x18, 0x17, 0x7B, 0x20, 0x0C, + 0xBB, 0xE1, 0x17, 0x57, 0x7A, 0x61, 0x5D, 0x6C, 0x77, 0x09, 0x88, 0xC0, + 0xBA, 0xD9, 0x46, 0xE2, 0x08, 0xE2, 0x4F, 0xA0, 0x74, 0xE5, 0xAB, 0x31, + 0x43, 0xDB, 0x5B, 0xFC, 0xE0, 0xFD, 0x10, 0x8E, 0x4B, 0x82, 0xD1, 0x20, + 0xA9, 0x21, 0x08, 0x01, 0x1A, 0x72, 0x3C, 0x12, 0xA7, 0x87, 0xE6, 0xD7, + 0x88, 0x71, 0x9A, 0x10, 0xBD, 0xBA, 0x5B, 0x26, 0x99, 0xC3, 0x27, 0x18, + 0x6A, 0xF4, 0xE2, 0x3C, 0x1A, 0x94, 0x68, 0x34, 0xB6, 0x15, 0x0B, 0xDA, + 0x25, 0x83, 0xE9, 0xCA, 0x2A, 0xD4, 0x4C, 0xE8, 0xDB, 0xBB, 0xC2, 0xDB, + 0x04, 0xDE, 0x8E, 0xF9, 0x2E, 0x8E, 0xFC, 0x14, 0x1F, 0xBE, 0xCA, 0xA6, + 0x28, 0x7C, 0x59, 0x47, 0x4E, 0x6B, 0xC0, 0x5D, 0x99, 0xB2, 0x96, 0x4F, + 0xA0, 0x90, 0xC3, 0xA2, 0x23, 0x3B, 0xA1, 0x86, 0x51, 0x5B, 0xE7, 0xED, + 0x1F, 0x61, 0x29, 0x70, 0xCE, 0xE2, 0xD7, 0xAF, 0xB8, 0x1B, 0xDD, 0x76, + 0x21, 0x70, 0x48, 0x1C, 0xD0, 0x06, 0x91, 0x27, 0xD5, 0xB0, 0x5A, 0xA9, + 0x93, 0xB4, 0xEA, 0x98, 0x8D, 0x8F, 0xDD, 0xC1, 0x86, 0xFF, 0xB7, 0xDC, + 0x90, 0xA6, 0xC0, 0x8F, 0x4D, 0xF4, 0x35, 0xC9, 0x34, 0x06, 0x31, 0x99, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + int ret; + libssh2_sha512_ctx exchange_hash_ctx; + + if(key_state->state == libssh2_NB_state_idle) { + key_state->p = _libssh2_bn_init_from_bin(); /* SSH2 defined value + (p_value) */ + key_state->g = _libssh2_bn_init(); /* SSH2 defined value (2) */ + + /* g == 2 */ + /* Initialize P and G */ + _libssh2_bn_set_word(key_state->g, 2); + _libssh2_bn_from_bin(key_state->p, 512, p_value); + + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group16 Key Exchange"); + + key_state->state = libssh2_NB_state_created; + } + + ret = diffie_hellman_sha_algo(session, key_state->g, key_state->p, 512, + 512, (void *)&exchange_hash_ctx, + SSH_MSG_KEXDH_INIT, SSH_MSG_KEXDH_REPLY, + NULL, 0, &key_state->exchange_state); + if(ret == LIBSSH2_ERROR_EAGAIN) { + return ret; + } + + key_state->state = libssh2_NB_state_idle; + _libssh2_bn_free(key_state->p); + key_state->p = NULL; + _libssh2_bn_free(key_state->g); + key_state->g = NULL; + + return ret; +} + +/* kex_method_diffie_hellman_group16_sha512_key_exchange +* Diffie-Hellman Group18 Key Exchange using SHA512 +*/ +static int +kex_method_diffie_hellman_group18_sha512_key_exchange(LIBSSH2_SESSION *session, + key_exchange_state_low_t + * key_state) + +{ + static const unsigned char p_value[1024] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC9, 0x0F, 0xDA, 0xA2, + 0x21, 0x68, 0xC2, 0x34, 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1, + 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74, 0x02, 0x0B, 0xBE, 0xA6, + 0x3B, 0x13, 0x9B, 0x22, 0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD, + 0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B, 0x30, 0x2B, 0x0A, 0x6D, + 0xF2, 0x5F, 0x14, 0x37, 0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45, + 0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6, 0xF4, 0x4C, 0x42, 0xE9, + 0xA6, 0x37, 0xED, 0x6B, 0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED, + 0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5, 0xAE, 0x9F, 0x24, 0x11, + 0x7C, 0x4B, 0x1F, 0xE6, 0x49, 0x28, 0x66, 0x51, 0xEC, 0xE4, 0x5B, 0x3D, + 0xC2, 0x00, 0x7C, 0xB8, 0xA1, 0x63, 0xBF, 0x05, 0x98, 0xDA, 0x48, 0x36, + 0x1C, 0x55, 0xD3, 0x9A, 0x69, 0x16, 0x3F, 0xA8, 0xFD, 0x24, 0xCF, 0x5F, + 0x83, 0x65, 0x5D, 0x23, 0xDC, 0xA3, 0xAD, 0x96, 0x1C, 0x62, 0xF3, 0x56, + 0x20, 0x85, 0x52, 0xBB, 0x9E, 0xD5, 0x29, 0x07, 0x70, 0x96, 0x96, 0x6D, + 0x67, 0x0C, 0x35, 0x4E, 0x4A, 0xBC, 0x98, 0x04, 0xF1, 0x74, 0x6C, 0x08, + 0xCA, 0x18, 0x21, 0x7C, 0x32, 0x90, 0x5E, 0x46, 0x2E, 0x36, 0xCE, 0x3B, + 0xE3, 0x9E, 0x77, 0x2C, 0x18, 0x0E, 0x86, 0x03, 0x9B, 0x27, 0x83, 0xA2, + 0xEC, 0x07, 0xA2, 0x8F, 0xB5, 0xC5, 0x5D, 0xF0, 0x6F, 0x4C, 0x52, 0xC9, + 0xDE, 0x2B, 0xCB, 0xF6, 0x95, 0x58, 0x17, 0x18, 0x39, 0x95, 0x49, 0x7C, + 0xEA, 0x95, 0x6A, 0xE5, 0x15, 0xD2, 0x26, 0x18, 0x98, 0xFA, 0x05, 0x10, + 0x15, 0x72, 0x8E, 0x5A, 0x8A, 0xAA, 0xC4, 0x2D, 0xAD, 0x33, 0x17, 0x0D, + 0x04, 0x50, 0x7A, 0x33, 0xA8, 0x55, 0x21, 0xAB, 0xDF, 0x1C, 0xBA, 0x64, + 0xEC, 0xFB, 0x85, 0x04, 0x58, 0xDB, 0xEF, 0x0A, 0x8A, 0xEA, 0x71, 0x57, + 0x5D, 0x06, 0x0C, 0x7D, 0xB3, 0x97, 0x0F, 0x85, 0xA6, 0xE1, 0xE4, 0xC7, + 0xAB, 0xF5, 0xAE, 0x8C, 0xDB, 0x09, 0x33, 0xD7, 0x1E, 0x8C, 0x94, 0xE0, + 0x4A, 0x25, 0x61, 0x9D, 0xCE, 0xE3, 0xD2, 0x26, 0x1A, 0xD2, 0xEE, 0x6B, + 0xF1, 0x2F, 0xFA, 0x06, 0xD9, 0x8A, 0x08, 0x64, 0xD8, 0x76, 0x02, 0x73, + 0x3E, 0xC8, 0x6A, 0x64, 0x52, 0x1F, 0x2B, 0x18, 0x17, 0x7B, 0x20, 0x0C, + 0xBB, 0xE1, 0x17, 0x57, 0x7A, 0x61, 0x5D, 0x6C, 0x77, 0x09, 0x88, 0xC0, + 0xBA, 0xD9, 0x46, 0xE2, 0x08, 0xE2, 0x4F, 0xA0, 0x74, 0xE5, 0xAB, 0x31, + 0x43, 0xDB, 0x5B, 0xFC, 0xE0, 0xFD, 0x10, 0x8E, 0x4B, 0x82, 0xD1, 0x20, + 0xA9, 0x21, 0x08, 0x01, 0x1A, 0x72, 0x3C, 0x12, 0xA7, 0x87, 0xE6, 0xD7, + 0x88, 0x71, 0x9A, 0x10, 0xBD, 0xBA, 0x5B, 0x26, 0x99, 0xC3, 0x27, 0x18, + 0x6A, 0xF4, 0xE2, 0x3C, 0x1A, 0x94, 0x68, 0x34, 0xB6, 0x15, 0x0B, 0xDA, + 0x25, 0x83, 0xE9, 0xCA, 0x2A, 0xD4, 0x4C, 0xE8, 0xDB, 0xBB, 0xC2, 0xDB, + 0x04, 0xDE, 0x8E, 0xF9, 0x2E, 0x8E, 0xFC, 0x14, 0x1F, 0xBE, 0xCA, 0xA6, + 0x28, 0x7C, 0x59, 0x47, 0x4E, 0x6B, 0xC0, 0x5D, 0x99, 0xB2, 0x96, 0x4F, + 0xA0, 0x90, 0xC3, 0xA2, 0x23, 0x3B, 0xA1, 0x86, 0x51, 0x5B, 0xE7, 0xED, + 0x1F, 0x61, 0x29, 0x70, 0xCE, 0xE2, 0xD7, 0xAF, 0xB8, 0x1B, 0xDD, 0x76, + 0x21, 0x70, 0x48, 0x1C, 0xD0, 0x06, 0x91, 0x27, 0xD5, 0xB0, 0x5A, 0xA9, + 0x93, 0xB4, 0xEA, 0x98, 0x8D, 0x8F, 0xDD, 0xC1, 0x86, 0xFF, 0xB7, 0xDC, + 0x90, 0xA6, 0xC0, 0x8F, 0x4D, 0xF4, 0x35, 0xC9, 0x34, 0x02, 0x84, 0x92, + 0x36, 0xC3, 0xFA, 0xB4, 0xD2, 0x7C, 0x70, 0x26, 0xC1, 0xD4, 0xDC, 0xB2, + 0x60, 0x26, 0x46, 0xDE, 0xC9, 0x75, 0x1E, 0x76, 0x3D, 0xBA, 0x37, 0xBD, + 0xF8, 0xFF, 0x94, 0x06, 0xAD, 0x9E, 0x53, 0x0E, 0xE5, 0xDB, 0x38, 0x2F, + 0x41, 0x30, 0x01, 0xAE, 0xB0, 0x6A, 0x53, 0xED, 0x90, 0x27, 0xD8, 0x31, + 0x17, 0x97, 0x27, 0xB0, 0x86, 0x5A, 0x89, 0x18, 0xDA, 0x3E, 0xDB, 0xEB, + 0xCF, 0x9B, 0x14, 0xED, 0x44, 0xCE, 0x6C, 0xBA, 0xCE, 0xD4, 0xBB, 0x1B, + 0xDB, 0x7F, 0x14, 0x47, 0xE6, 0xCC, 0x25, 0x4B, 0x33, 0x20, 0x51, 0x51, + 0x2B, 0xD7, 0xAF, 0x42, 0x6F, 0xB8, 0xF4, 0x01, 0x37, 0x8C, 0xD2, 0xBF, + 0x59, 0x83, 0xCA, 0x01, 0xC6, 0x4B, 0x92, 0xEC, 0xF0, 0x32, 0xEA, 0x15, + 0xD1, 0x72, 0x1D, 0x03, 0xF4, 0x82, 0xD7, 0xCE, 0x6E, 0x74, 0xFE, 0xF6, + 0xD5, 0x5E, 0x70, 0x2F, 0x46, 0x98, 0x0C, 0x82, 0xB5, 0xA8, 0x40, 0x31, + 0x90, 0x0B, 0x1C, 0x9E, 0x59, 0xE7, 0xC9, 0x7F, 0xBE, 0xC7, 0xE8, 0xF3, + 0x23, 0xA9, 0x7A, 0x7E, 0x36, 0xCC, 0x88, 0xBE, 0x0F, 0x1D, 0x45, 0xB7, + 0xFF, 0x58, 0x5A, 0xC5, 0x4B, 0xD4, 0x07, 0xB2, 0x2B, 0x41, 0x54, 0xAA, + 0xCC, 0x8F, 0x6D, 0x7E, 0xBF, 0x48, 0xE1, 0xD8, 0x14, 0xCC, 0x5E, 0xD2, + 0x0F, 0x80, 0x37, 0xE0, 0xA7, 0x97, 0x15, 0xEE, 0xF2, 0x9B, 0xE3, 0x28, + 0x06, 0xA1, 0xD5, 0x8B, 0xB7, 0xC5, 0xDA, 0x76, 0xF5, 0x50, 0xAA, 0x3D, + 0x8A, 0x1F, 0xBF, 0xF0, 0xEB, 0x19, 0xCC, 0xB1, 0xA3, 0x13, 0xD5, 0x5C, + 0xDA, 0x56, 0xC9, 0xEC, 0x2E, 0xF2, 0x96, 0x32, 0x38, 0x7F, 0xE8, 0xD7, + 0x6E, 0x3C, 0x04, 0x68, 0x04, 0x3E, 0x8F, 0x66, 0x3F, 0x48, 0x60, 0xEE, + 0x12, 0xBF, 0x2D, 0x5B, 0x0B, 0x74, 0x74, 0xD6, 0xE6, 0x94, 0xF9, 0x1E, + 0x6D, 0xBE, 0x11, 0x59, 0x74, 0xA3, 0x92, 0x6F, 0x12, 0xFE, 0xE5, 0xE4, + 0x38, 0x77, 0x7C, 0xB6, 0xA9, 0x32, 0xDF, 0x8C, 0xD8, 0xBE, 0xC4, 0xD0, + 0x73, 0xB9, 0x31, 0xBA, 0x3B, 0xC8, 0x32, 0xB6, 0x8D, 0x9D, 0xD3, 0x00, + 0x74, 0x1F, 0xA7, 0xBF, 0x8A, 0xFC, 0x47, 0xED, 0x25, 0x76, 0xF6, 0x93, + 0x6B, 0xA4, 0x24, 0x66, 0x3A, 0xAB, 0x63, 0x9C, 0x5A, 0xE4, 0xF5, 0x68, + 0x34, 0x23, 0xB4, 0x74, 0x2B, 0xF1, 0xC9, 0x78, 0x23, 0x8F, 0x16, 0xCB, + 0xE3, 0x9D, 0x65, 0x2D, 0xE3, 0xFD, 0xB8, 0xBE, 0xFC, 0x84, 0x8A, 0xD9, + 0x22, 0x22, 0x2E, 0x04, 0xA4, 0x03, 0x7C, 0x07, 0x13, 0xEB, 0x57, 0xA8, + 0x1A, 0x23, 0xF0, 0xC7, 0x34, 0x73, 0xFC, 0x64, 0x6C, 0xEA, 0x30, 0x6B, + 0x4B, 0xCB, 0xC8, 0x86, 0x2F, 0x83, 0x85, 0xDD, 0xFA, 0x9D, 0x4B, 0x7F, + 0xA2, 0xC0, 0x87, 0xE8, 0x79, 0x68, 0x33, 0x03, 0xED, 0x5B, 0xDD, 0x3A, + 0x06, 0x2B, 0x3C, 0xF5, 0xB3, 0xA2, 0x78, 0xA6, 0x6D, 0x2A, 0x13, 0xF8, + 0x3F, 0x44, 0xF8, 0x2D, 0xDF, 0x31, 0x0E, 0xE0, 0x74, 0xAB, 0x6A, 0x36, + 0x45, 0x97, 0xE8, 0x99, 0xA0, 0x25, 0x5D, 0xC1, 0x64, 0xF3, 0x1C, 0xC5, + 0x08, 0x46, 0x85, 0x1D, 0xF9, 0xAB, 0x48, 0x19, 0x5D, 0xED, 0x7E, 0xA1, + 0xB1, 0xD5, 0x10, 0xBD, 0x7E, 0xE7, 0x4D, 0x73, 0xFA, 0xF3, 0x6B, 0xC3, + 0x1E, 0xCF, 0xA2, 0x68, 0x35, 0x90, 0x46, 0xF4, 0xEB, 0x87, 0x9F, 0x92, + 0x40, 0x09, 0x43, 0x8B, 0x48, 0x1C, 0x6C, 0xD7, 0x88, 0x9A, 0x00, 0x2E, + 0xD5, 0xEE, 0x38, 0x2B, 0xC9, 0x19, 0x0D, 0xA6, 0xFC, 0x02, 0x6E, 0x47, + 0x95, 0x58, 0xE4, 0x47, 0x56, 0x77, 0xE9, 0xAA, 0x9E, 0x30, 0x50, 0xE2, + 0x76, 0x56, 0x94, 0xDF, 0xC8, 0x1F, 0x56, 0xE8, 0x80, 0xB9, 0x6E, 0x71, + 0x60, 0xC9, 0x80, 0xDD, 0x98, 0xED, 0xD3, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF + }; + int ret; + libssh2_sha512_ctx exchange_hash_ctx; + + if(key_state->state == libssh2_NB_state_idle) { + key_state->p = _libssh2_bn_init_from_bin(); /* SSH2 defined value + (p_value) */ + key_state->g = _libssh2_bn_init(); /* SSH2 defined value (2) */ + + /* g == 2 */ + /* Initialize P and G */ + _libssh2_bn_set_word(key_state->g, 2); + _libssh2_bn_from_bin(key_state->p, 1024, p_value); + + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group18 Key Exchange"); + + key_state->state = libssh2_NB_state_created; + } + + ret = diffie_hellman_sha_algo(session, key_state->g, key_state->p, 1024, + 512, (void *)&exchange_hash_ctx, + SSH_MSG_KEXDH_INIT, SSH_MSG_KEXDH_REPLY, + NULL, 0, &key_state->exchange_state); + if(ret == LIBSSH2_ERROR_EAGAIN) { + return ret; + } + + key_state->state = libssh2_NB_state_idle; + _libssh2_bn_free(key_state->p); + key_state->p = NULL; + _libssh2_bn_free(key_state->g); + key_state->g = NULL; + + return ret; +} + +/* kex_method_diffie_hellman_group_exchange_sha1_key_exchange + * Diffie-Hellman Group Exchange Key Exchange using SHA1 + * Negotiates random(ish) group for secret derivation + */ +static int +kex_method_diffie_hellman_group_exchange_sha1_key_exchange +(LIBSSH2_SESSION * session, key_exchange_state_low_t * key_state) +{ + int ret = 0; + int rc; + + if(key_state->state == libssh2_NB_state_idle) { + key_state->p = _libssh2_bn_init_from_bin(); + key_state->g = _libssh2_bn_init_from_bin(); + /* Ask for a P and G pair */ +#ifdef LIBSSH2_DH_GEX_NEW + key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST; + _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_MINGROUP); + _libssh2_htonu32(key_state->request + 5, LIBSSH2_DH_GEX_OPTGROUP); + _libssh2_htonu32(key_state->request + 9, LIBSSH2_DH_GEX_MAXGROUP); + key_state->request_len = 13; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group-Exchange " + "(New Method)"); +#else + key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST_OLD; + _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_OPTGROUP); + key_state->request_len = 5; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group-Exchange " + "(Old Method)"); +#endif + + key_state->state = libssh2_NB_state_created; + } + + if(key_state->state == libssh2_NB_state_created) { + rc = _libssh2_transport_send(session, key_state->request, + key_state->request_len, NULL, 0); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Unable to send Group Exchange Request"); + goto dh_gex_clean_exit; + } + + key_state->state = libssh2_NB_state_sent; + } + + if(key_state->state == libssh2_NB_state_sent) { + rc = _libssh2_packet_require(session, SSH_MSG_KEX_DH_GEX_GROUP, + &key_state->data, &key_state->data_len, + 0, NULL, 0, &key_state->req_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Timeout waiting for GEX_GROUP reply"); + goto dh_gex_clean_exit; + } + + key_state->state = libssh2_NB_state_sent1; + } + + if(key_state->state == libssh2_NB_state_sent1) { + size_t p_len, g_len; + unsigned char *p, *g; + struct string_buf buf; + libssh2_sha1_ctx exchange_hash_ctx; + + if(key_state->data_len < 9) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected key length"); + goto dh_gex_clean_exit; + } + + buf.data = key_state->data; + buf.dataptr = buf.data; + buf.len = key_state->data_len; + + buf.dataptr++; /* increment to big num */ + + if(_libssh2_get_bignum_bytes(&buf, &p, &p_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected value"); + goto dh_gex_clean_exit; + } + + if(_libssh2_get_bignum_bytes(&buf, &g, &g_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected value"); + goto dh_gex_clean_exit; + } + + _libssh2_bn_from_bin(key_state->p, p_len, p); + _libssh2_bn_from_bin(key_state->g, g_len, g); + + ret = diffie_hellman_sha_algo(session, key_state->g, key_state->p, + p_len, 1, + (void *)&exchange_hash_ctx, + SSH_MSG_KEX_DH_GEX_INIT, + SSH_MSG_KEX_DH_GEX_REPLY, + key_state->data + 1, + key_state->data_len - 1, + &key_state->exchange_state); + if(ret == LIBSSH2_ERROR_EAGAIN) { + return ret; + } + + LIBSSH2_FREE(session, key_state->data); + } + + dh_gex_clean_exit: + key_state->state = libssh2_NB_state_idle; + _libssh2_bn_free(key_state->g); + key_state->g = NULL; + _libssh2_bn_free(key_state->p); + key_state->p = NULL; + + return ret; +} + + + +/* kex_method_diffie_hellman_group_exchange_sha256_key_exchange + * Diffie-Hellman Group Exchange Key Exchange using SHA256 + * Negotiates random(ish) group for secret derivation + */ +static int +kex_method_diffie_hellman_group_exchange_sha256_key_exchange +(LIBSSH2_SESSION * session, key_exchange_state_low_t * key_state) +{ + int ret = 0; + int rc; + + if(key_state->state == libssh2_NB_state_idle) { + key_state->p = _libssh2_bn_init(); + key_state->g = _libssh2_bn_init(); + /* Ask for a P and G pair */ +#ifdef LIBSSH2_DH_GEX_NEW + key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST; + _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_MINGROUP); + _libssh2_htonu32(key_state->request + 5, LIBSSH2_DH_GEX_OPTGROUP); + _libssh2_htonu32(key_state->request + 9, LIBSSH2_DH_GEX_MAXGROUP); + key_state->request_len = 13; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group-Exchange " + "(New Method SHA256)"); +#else + key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST_OLD; + _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_OPTGROUP); + key_state->request_len = 5; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating Diffie-Hellman Group-Exchange " + "(Old Method SHA256)"); +#endif + + key_state->state = libssh2_NB_state_created; + } + + if(key_state->state == libssh2_NB_state_created) { + rc = _libssh2_transport_send(session, key_state->request, + key_state->request_len, NULL, 0); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Unable to send " + "Group Exchange Request SHA256"); + goto dh_gex_clean_exit; + } + + key_state->state = libssh2_NB_state_sent; + } + + if(key_state->state == libssh2_NB_state_sent) { + rc = _libssh2_packet_require(session, SSH_MSG_KEX_DH_GEX_GROUP, + &key_state->data, &key_state->data_len, + 0, NULL, 0, &key_state->req_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Timeout waiting for GEX_GROUP reply SHA256"); + goto dh_gex_clean_exit; + } + + key_state->state = libssh2_NB_state_sent1; + } + + if(key_state->state == libssh2_NB_state_sent1) { + unsigned char *p, *g; + size_t p_len, g_len; + struct string_buf buf; + libssh2_sha256_ctx exchange_hash_ctx; + + if(key_state->data_len < 9) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected key length"); + goto dh_gex_clean_exit; + } + + buf.data = key_state->data; + buf.dataptr = buf.data; + buf.len = key_state->data_len; + + buf.dataptr++; /* increment to big num */ + + if(_libssh2_get_bignum_bytes(&buf, &p, &p_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected value"); + goto dh_gex_clean_exit; + } + + if(_libssh2_get_bignum_bytes(&buf, &g, &g_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected value"); + goto dh_gex_clean_exit; + } + + _libssh2_bn_from_bin(key_state->p, p_len, p); + _libssh2_bn_from_bin(key_state->g, g_len, g); + + ret = diffie_hellman_sha_algo(session, key_state->g, key_state->p, + p_len, 256, + (void *)&exchange_hash_ctx, + SSH_MSG_KEX_DH_GEX_INIT, + SSH_MSG_KEX_DH_GEX_REPLY, + key_state->data + 1, + key_state->data_len - 1, + &key_state->exchange_state); + if(ret == LIBSSH2_ERROR_EAGAIN) { + return ret; + } + + LIBSSH2_FREE(session, key_state->data); + } + + dh_gex_clean_exit: + key_state->state = libssh2_NB_state_idle; + _libssh2_bn_free(key_state->g); + key_state->g = NULL; + _libssh2_bn_free(key_state->p); + key_state->p = NULL; + + return ret; +} + + +/* LIBSSH2_KEX_METHOD_EC_SHA_HASH_CREATE_VERIFY + * + * Macro that create and verifies EC SHA hash with a given digest bytes + * + * Payload format: + * + * string V_C, client's identification string (CR and LF excluded) + * string V_S, server's identification string (CR and LF excluded) + * string I_C, payload of the client's SSH_MSG_KEXINIT + * string I_S, payload of the server's SSH_MSG_KEXINIT + * string K_S, server's public host key + * string Q_C, client's ephemeral public key octet string + * string Q_S, server's ephemeral public key octet string + * mpint K, shared secret + * + */ + +#define LIBSSH2_KEX_METHOD_EC_SHA_HASH_CREATE_VERIFY(digest_type) \ +{ \ + libssh2_sha##digest_type##_ctx ctx; \ + exchange_state->exchange_hash = (void *)&ctx; \ + libssh2_sha##digest_type##_init(&ctx); \ + if(session->local.banner) { \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + strlen((char *) session->local.banner) - 2); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + (char *) session->local.banner, \ + strlen((char *) \ + session->local.banner) \ + - 2); \ + } \ + else { \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + LIBSSH2_SSH_DEFAULT_BANNER, \ + sizeof(LIBSSH2_SSH_DEFAULT_BANNER) \ + - 1); \ + } \ + \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + strlen((char *) session->remote.banner)); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + session->remote.banner, \ + strlen((char *) \ + session->remote.banner)); \ + \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + session->local.kexinit_len); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + session->local.kexinit, \ + session->local.kexinit_len); \ + \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + session->remote.kexinit_len); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + session->remote.kexinit, \ + session->remote.kexinit_len); \ + \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + session->server_hostkey_len); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + session->server_hostkey, \ + session->server_hostkey_len); \ + \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + public_key_len); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + public_key, \ + public_key_len); \ + \ + _libssh2_htonu32(exchange_state->h_sig_comp, \ + server_public_key_len); \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->h_sig_comp, 4); \ + libssh2_sha##digest_type##_update(ctx, \ + server_public_key, \ + server_public_key_len); \ + \ + libssh2_sha##digest_type##_update(ctx, \ + exchange_state->k_value, \ + exchange_state->k_value_len); \ + \ + libssh2_sha##digest_type##_final(ctx, exchange_state->h_sig_comp); \ + \ + if(session->hostkey-> \ + sig_verify(session, exchange_state->h_sig, \ + exchange_state->h_sig_len, exchange_state->h_sig_comp, \ + SHA##digest_type##_DIGEST_LENGTH, \ + &session->server_hostkey_abstract)) { \ + rc = -1; \ + } \ +} \ + + +#if LIBSSH2_ECDSA + +/* kex_session_ecdh_curve_type + * returns the EC curve type by name used in key exchange + */ + +static int +kex_session_ecdh_curve_type(const char *name, libssh2_curve_type *out_type) +{ + int ret = 0; + libssh2_curve_type type; + + if(name == NULL) + return -1; + + if(strcmp(name, "ecdh-sha2-nistp256") == 0) + type = LIBSSH2_EC_CURVE_NISTP256; + else if(strcmp(name, "ecdh-sha2-nistp384") == 0) + type = LIBSSH2_EC_CURVE_NISTP384; + else if(strcmp(name, "ecdh-sha2-nistp521") == 0) + type = LIBSSH2_EC_CURVE_NISTP521; + else { + ret = -1; + } + + if(ret == 0 && out_type) { + *out_type = type; + } + + return ret; +} + + +/* ecdh_sha2_nistp + * Elliptic Curve Diffie Hellman Key Exchange + */ + +static int ecdh_sha2_nistp(LIBSSH2_SESSION *session, libssh2_curve_type type, + unsigned char *data, size_t data_len, + unsigned char *public_key, + size_t public_key_len, _libssh2_ec_key *private_key, + kmdhgGPshakex_state_t *exchange_state) +{ + int ret = 0; + int rc; + + if(data_len < 5) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Host key data is too short"); + return ret; + } + + if(exchange_state->state == libssh2_NB_state_idle) { + + /* Setup initial values */ + exchange_state->k = _libssh2_bn_init(); + + exchange_state->state = libssh2_NB_state_created; + } + + if(exchange_state->state == libssh2_NB_state_created) { + /* parse INIT reply data */ + + /* host key K_S */ + unsigned char *server_public_key; + size_t server_public_key_len; + struct string_buf buf; + + buf.data = data; + buf.len = data_len; + buf.dataptr = buf.data; + buf.dataptr++; /* Advance past packet type */ + + if(_libssh2_copy_string(session, &buf, &(session->server_hostkey), + &server_public_key_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for a copy " + "of the host key"); + goto clean_exit; + } + + session->server_hostkey_len = (uint32_t)server_public_key_len; + +#if LIBSSH2_MD5 + { + libssh2_md5_ctx fingerprint_ctx; + + if(libssh2_md5_init(&fingerprint_ctx)) { + libssh2_md5_update(fingerprint_ctx, session->server_hostkey, + session->server_hostkey_len); + libssh2_md5_final(fingerprint_ctx, + session->server_hostkey_md5); + session->server_hostkey_md5_valid = TRUE; + } + else { + session->server_hostkey_md5_valid = FALSE; + } + } +#ifdef LIBSSH2DEBUG + { + char fingerprint[50], *fprint = fingerprint; + int i; + for(i = 0; i < 16; i++, fprint += 3) { + snprintf(fprint, 4, "%02x:", session->server_hostkey_md5[i]); + } + *(--fprint) = '\0'; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server's MD5 Fingerprint: %s", fingerprint); + } +#endif /* LIBSSH2DEBUG */ +#endif /* ! LIBSSH2_MD5 */ + + { + libssh2_sha1_ctx fingerprint_ctx; + + if(libssh2_sha1_init(&fingerprint_ctx)) { + libssh2_sha1_update(fingerprint_ctx, session->server_hostkey, + session->server_hostkey_len); + libssh2_sha1_final(fingerprint_ctx, + session->server_hostkey_sha1); + session->server_hostkey_sha1_valid = TRUE; + } + else { + session->server_hostkey_sha1_valid = FALSE; + } + } +#ifdef LIBSSH2DEBUG + { + char fingerprint[64], *fprint = fingerprint; + int i; + + for(i = 0; i < 20; i++, fprint += 3) { + snprintf(fprint, 4, "%02x:", session->server_hostkey_sha1[i]); + } + *(--fprint) = '\0'; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server's SHA1 Fingerprint: %s", fingerprint); + } +#endif /* LIBSSH2DEBUG */ + + /* SHA256 */ + { + libssh2_sha256_ctx fingerprint_ctx; + + if(libssh2_sha256_init(&fingerprint_ctx)) { + libssh2_sha256_update(fingerprint_ctx, session->server_hostkey, + session->server_hostkey_len); + libssh2_sha256_final(fingerprint_ctx, + session->server_hostkey_sha256); + session->server_hostkey_sha256_valid = TRUE; + } + else { + session->server_hostkey_sha256_valid = FALSE; + } + } +#ifdef LIBSSH2DEBUG + { + char *base64Fingerprint = NULL; + _libssh2_base64_encode(session, + (const char *) + session->server_hostkey_sha256, + SHA256_DIGEST_LENGTH, &base64Fingerprint); + if(base64Fingerprint != NULL) { + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server's SHA256 Fingerprint: %s", + base64Fingerprint); + LIBSSH2_FREE(session, base64Fingerprint); + } + } +#endif /* LIBSSH2DEBUG */ + + if(session->hostkey->init(session, session->server_hostkey, + session->server_hostkey_len, + &session->server_hostkey_abstract)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Unable to initialize hostkey importer"); + goto clean_exit; + } + + /* server public key Q_S */ + if(_libssh2_get_string(&buf, &server_public_key, + &server_public_key_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected key length"); + goto clean_exit; + } + + /* server signature */ + if(_libssh2_get_string(&buf, &exchange_state->h_sig, + &(exchange_state->h_sig_len))) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Unexpected ecdh server sig length"); + goto clean_exit; + } + + /* Compute the shared secret K */ + rc = _libssh2_ecdh_gen_k(&exchange_state->k, private_key, + server_public_key, server_public_key_len); + if(rc != 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_KEX_FAILURE, + "Unable to create ECDH shared secret"); + goto clean_exit; + } + + exchange_state->k_value_len = _libssh2_bn_bytes(exchange_state->k) + 5; + if(_libssh2_bn_bits(exchange_state->k) % 8) { + /* don't need leading 00 */ + exchange_state->k_value_len--; + } + exchange_state->k_value = + LIBSSH2_ALLOC(session, exchange_state->k_value_len); + if(!exchange_state->k_value) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate buffer for K"); + goto clean_exit; + } + _libssh2_htonu32(exchange_state->k_value, + exchange_state->k_value_len - 4); + if(_libssh2_bn_bits(exchange_state->k) % 8) { + _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 4); + } + else { + exchange_state->k_value[4] = 0; + _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 5); + } + + /* verify hash */ + + switch(type) { + case LIBSSH2_EC_CURVE_NISTP256: + LIBSSH2_KEX_METHOD_EC_SHA_HASH_CREATE_VERIFY(256); + break; + + case LIBSSH2_EC_CURVE_NISTP384: + LIBSSH2_KEX_METHOD_EC_SHA_HASH_CREATE_VERIFY(384); + break; + case LIBSSH2_EC_CURVE_NISTP521: + LIBSSH2_KEX_METHOD_EC_SHA_HASH_CREATE_VERIFY(512); + break; + } + + if(rc != 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_SIGN, + "Unable to verify hostkey signature"); + goto clean_exit; + } + + exchange_state->c = SSH_MSG_NEWKEYS; + exchange_state->state = libssh2_NB_state_sent; + } + + if(exchange_state->state == libssh2_NB_state_sent) { + rc = _libssh2_transport_send(session, &exchange_state->c, 1, NULL, 0); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Unable to send NEWKEYS message"); + goto clean_exit; + } + + exchange_state->state = libssh2_NB_state_sent2; + } + + if(exchange_state->state == libssh2_NB_state_sent2) { + rc = _libssh2_packet_require(session, SSH_MSG_NEWKEYS, + &exchange_state->tmp, + &exchange_state->tmp_len, 0, NULL, 0, + &exchange_state->req_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, "Timed out waiting for NEWKEYS"); + goto clean_exit; + } + + /* The first key exchange has been performed, + switch to active crypt/comp/mac mode */ + session->state |= LIBSSH2_STATE_NEWKEYS; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Received NEWKEYS message"); + + /* This will actually end up being just packet_type(1) + for this packet type anyway */ + LIBSSH2_FREE(session, exchange_state->tmp); + + if(!session->session_id) { + + size_t digest_length = 0; + + if(type == LIBSSH2_EC_CURVE_NISTP256) + digest_length = SHA256_DIGEST_LENGTH; + else if(type == LIBSSH2_EC_CURVE_NISTP384) + digest_length = SHA384_DIGEST_LENGTH; + else if(type == LIBSSH2_EC_CURVE_NISTP521) + digest_length = SHA512_DIGEST_LENGTH; + else{ + ret = _libssh2_error(session, LIBSSH2_ERROR_KEX_FAILURE, + "Unknown SHA digest for EC curve"); + goto clean_exit; + + } + session->session_id = LIBSSH2_ALLOC(session, digest_length); + if(!session->session_id) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate buffer for " + "SHA digest"); + goto clean_exit; + } + memcpy(session->session_id, exchange_state->h_sig_comp, + digest_length); + session->session_id_len = digest_length; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "session_id calculated"); + } + + /* Cleanup any existing cipher */ + if(session->local.crypt->dtor) { + session->local.crypt->dtor(session, + &session->local.crypt_abstract); + } + + /* Calculate IV/Secret/Key for each direction */ + if(session->local.crypt->init) { + unsigned char *iv = NULL, *secret = NULL; + int free_iv = 0, free_secret = 0; + + LIBSSH2_KEX_METHOD_EC_SHA_VALUE_HASH(iv, + session->local.crypt-> + iv_len, "A"); + if(!iv) { + ret = -1; + goto clean_exit; + } + + LIBSSH2_KEX_METHOD_EC_SHA_VALUE_HASH(secret, + session->local.crypt-> + secret_len, "C"); + + if(!secret) { + LIBSSH2_FREE(session, iv); + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + if(session->local.crypt-> + init(session, session->local.crypt, iv, &free_iv, secret, + &free_secret, 1, &session->local.crypt_abstract)) { + LIBSSH2_FREE(session, iv); + LIBSSH2_FREE(session, secret); + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + + if(free_iv) { + _libssh2_explicit_zero(iv, session->local.crypt->iv_len); + LIBSSH2_FREE(session, iv); + } + + if(free_secret) { + _libssh2_explicit_zero(secret, + session->local.crypt->secret_len); + LIBSSH2_FREE(session, secret); + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Client to Server IV and Key calculated"); + + if(session->remote.crypt->dtor) { + /* Cleanup any existing cipher */ + session->remote.crypt->dtor(session, + &session->remote.crypt_abstract); + } + + if(session->remote.crypt->init) { + unsigned char *iv = NULL, *secret = NULL; + int free_iv = 0, free_secret = 0; + + LIBSSH2_KEX_METHOD_EC_SHA_VALUE_HASH(iv, + session->remote.crypt-> + iv_len, "B"); + + if(!iv) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + LIBSSH2_KEX_METHOD_EC_SHA_VALUE_HASH(secret, + session->remote.crypt-> + secret_len, "D"); + + if(!secret) { + LIBSSH2_FREE(session, iv); + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + if(session->remote.crypt-> + init(session, session->remote.crypt, iv, &free_iv, secret, + &free_secret, 0, &session->remote.crypt_abstract)) { + LIBSSH2_FREE(session, iv); + LIBSSH2_FREE(session, secret); + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + + if(free_iv) { + _libssh2_explicit_zero(iv, session->remote.crypt->iv_len); + LIBSSH2_FREE(session, iv); + } + + if(free_secret) { + _libssh2_explicit_zero(secret, + session->remote.crypt->secret_len); + LIBSSH2_FREE(session, secret); + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server to Client IV and Key calculated"); + + if(session->local.mac->dtor) { + session->local.mac->dtor(session, &session->local.mac_abstract); + } + + if(session->local.mac->init) { + unsigned char *key = NULL; + int free_key = 0; + + LIBSSH2_KEX_METHOD_EC_SHA_VALUE_HASH(key, + session->local.mac-> + key_len, "E"); + + if(!key) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + session->local.mac->init(session, key, &free_key, + &session->local.mac_abstract); + + if(free_key) { + _libssh2_explicit_zero(key, session->local.mac->key_len); + LIBSSH2_FREE(session, key); + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Client to Server HMAC Key calculated"); + + if(session->remote.mac->dtor) { + session->remote.mac->dtor(session, &session->remote.mac_abstract); + } + + if(session->remote.mac->init) { + unsigned char *key = NULL; + int free_key = 0; + + LIBSSH2_KEX_METHOD_EC_SHA_VALUE_HASH(key, + session->remote.mac-> + key_len, "F"); + + if(!key) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + session->remote.mac->init(session, key, &free_key, + &session->remote.mac_abstract); + + if(free_key) { + _libssh2_explicit_zero(key, session->remote.mac->key_len); + LIBSSH2_FREE(session, key); + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server to Client HMAC Key calculated"); + + /* Initialize compression for each direction */ + + /* Cleanup any existing compression */ + if(session->local.comp && session->local.comp->dtor) { + session->local.comp->dtor(session, 1, + &session->local.comp_abstract); + } + + if(session->local.comp && session->local.comp->init) { + if(session->local.comp->init(session, 1, + &session->local.comp_abstract)) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Client to Server compression initialized"); + + if(session->remote.comp && session->remote.comp->dtor) { + session->remote.comp->dtor(session, 0, + &session->remote.comp_abstract); + } + + if(session->remote.comp && session->remote.comp->init) { + if(session->remote.comp->init(session, 0, + &session->remote.comp_abstract)) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server to Client compression initialized"); + + } + +clean_exit: + _libssh2_bn_free(exchange_state->k); + exchange_state->k = NULL; + + if(exchange_state->k_value) { + LIBSSH2_FREE(session, exchange_state->k_value); + exchange_state->k_value = NULL; + } + + exchange_state->state = libssh2_NB_state_idle; + + return ret; +} + +/* kex_method_ecdh_key_exchange + * + * Elliptic Curve Diffie Hellman Key Exchange + * supports SHA256/384/512 hashes based on negotated ecdh method + * + */ + +static int +kex_method_ecdh_key_exchange +(LIBSSH2_SESSION * session, key_exchange_state_low_t * key_state) +{ + int ret = 0; + int rc = 0; + unsigned char *s; + libssh2_curve_type type; + + if(key_state->state == libssh2_NB_state_idle) { + + key_state->public_key_oct = NULL; + key_state->state = libssh2_NB_state_created; + } + + if(key_state->state == libssh2_NB_state_created) { + rc = kex_session_ecdh_curve_type(session->kex->name, &type); + + if(rc != 0) { + ret = _libssh2_error(session, -1, + "Unknown KEX nistp curve type"); + goto ecdh_clean_exit; + } + + rc = _libssh2_ecdsa_create_key(session, &key_state->private_key, + &key_state->public_key_oct, + &key_state->public_key_oct_len, type); + + if(rc != 0) { + ret = _libssh2_error(session, rc, + "Unable to create private key"); + goto ecdh_clean_exit; + } + + key_state->request[0] = SSH2_MSG_KEX_ECDH_INIT; + s = key_state->request + 1; + _libssh2_store_str(&s, (const char *)key_state->public_key_oct, + key_state->public_key_oct_len); + key_state->request_len = key_state->public_key_oct_len + 5; + + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating ECDH SHA2 NISTP256"); + + key_state->state = libssh2_NB_state_sent; + } + + if(key_state->state == libssh2_NB_state_sent) { + rc = _libssh2_transport_send(session, key_state->request, + key_state->request_len, NULL, 0); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Unable to send ECDH_INIT"); + goto ecdh_clean_exit; + } + + key_state->state = libssh2_NB_state_sent1; + } + + if(key_state->state == libssh2_NB_state_sent1) { + rc = _libssh2_packet_require(session, SSH2_MSG_KEX_ECDH_REPLY, + &key_state->data, &key_state->data_len, + 0, NULL, 0, &key_state->req_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return rc; + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Timeout waiting for ECDH_REPLY reply"); + goto ecdh_clean_exit; + } + + key_state->state = libssh2_NB_state_sent2; + } + + if(key_state->state == libssh2_NB_state_sent2) { + + (void)kex_session_ecdh_curve_type(session->kex->name, &type); + + ret = ecdh_sha2_nistp(session, type, key_state->data, + key_state->data_len, + (unsigned char *)key_state->public_key_oct, + key_state->public_key_oct_len, + key_state->private_key, + &key_state->exchange_state); + + if(ret == LIBSSH2_ERROR_EAGAIN) { + return ret; + } + + LIBSSH2_FREE(session, key_state->data); + } + +ecdh_clean_exit: + + if(key_state->public_key_oct) { + LIBSSH2_FREE(session, key_state->public_key_oct); + key_state->public_key_oct = NULL; + } + + if(key_state->private_key) { + _libssh2_ecdsa_free(key_state->private_key); + key_state->private_key = NULL; + } + + key_state->state = libssh2_NB_state_idle; + + return ret; +} + +#endif /*LIBSSH2_ECDSA*/ + + +#if LIBSSH2_ED25519 + +/* curve25519_sha256 + * Elliptic Curve Key Exchange + */ + +static int +curve25519_sha256(LIBSSH2_SESSION *session, unsigned char *data, + size_t data_len, + unsigned char public_key[LIBSSH2_ED25519_KEY_LEN], + unsigned char private_key[LIBSSH2_ED25519_KEY_LEN], + kmdhgGPshakex_state_t *exchange_state) +{ + int ret = 0; + int rc; + int public_key_len = LIBSSH2_ED25519_KEY_LEN; + + if(data_len < 5) { + return _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Data is too short"); + } + + if(exchange_state->state == libssh2_NB_state_idle) { + + /* Setup initial values */ + exchange_state->k = _libssh2_bn_init(); + + exchange_state->state = libssh2_NB_state_created; + } + + if(exchange_state->state == libssh2_NB_state_created) { + /* parse INIT reply data */ + unsigned char *server_public_key, *server_host_key; + size_t server_public_key_len, hostkey_len; + struct string_buf buf; + + if(data_len < 5) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected key length"); + goto clean_exit; + } + + buf.data = data; + buf.len = data_len; + buf.dataptr = buf.data; + buf.dataptr++; /* advance past packet type */ + + if(_libssh2_get_string(&buf, &server_host_key, &hostkey_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected key length"); + goto clean_exit; + } + + session->server_hostkey_len = (uint32_t)hostkey_len; + session->server_hostkey = LIBSSH2_ALLOC(session, + session->server_hostkey_len); + if(!session->server_hostkey) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for a copy " "of the host key"); goto clean_exit; } - memcpy(session->server_hostkey, exchange_state->s, + + memcpy(session->server_hostkey, server_host_key, session->server_hostkey_len); - exchange_state->s += session->server_hostkey_len; #if LIBSSH2_MD5 { libssh2_md5_ctx fingerprint_ctx; - if (libssh2_md5_init(&fingerprint_ctx)) { + if(libssh2_md5_init(&fingerprint_ctx)) { libssh2_md5_update(fingerprint_ctx, session->server_hostkey, session->server_hostkey_len); libssh2_md5_final(fingerprint_ctx, @@ -892,7 +2421,7 @@ static int diffie_hellman_sha256(LIBSSH2_SESSION *session, } *(--fprint) = '\0'; _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Server's MD5 Fingerprint: %s", fingerprint); + "Server's MD5 Fingerprint: %s", fingerprint); } #endif /* LIBSSH2DEBUG */ #endif /* ! LIBSSH2_MD5 */ @@ -900,7 +2429,7 @@ static int diffie_hellman_sha256(LIBSSH2_SESSION *session, { libssh2_sha1_ctx fingerprint_ctx; - if (libssh2_sha1_init(&fingerprint_ctx)) { + if(libssh2_sha1_init(&fingerprint_ctx)) { libssh2_sha1_update(fingerprint_ctx, session->server_hostkey, session->server_hostkey_len); libssh2_sha1_final(fingerprint_ctx, @@ -921,11 +2450,42 @@ static int diffie_hellman_sha256(LIBSSH2_SESSION *session, } *(--fprint) = '\0'; _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Server's SHA1 Fingerprint: %s", fingerprint); + "Server's SHA1 Fingerprint: %s", fingerprint); + } +#endif /* LIBSSH2DEBUG */ + + /* SHA256 */ + { + libssh2_sha256_ctx fingerprint_ctx; + + if(libssh2_sha256_init(&fingerprint_ctx)) { + libssh2_sha256_update(fingerprint_ctx, session->server_hostkey, + session->server_hostkey_len); + libssh2_sha256_final(fingerprint_ctx, + session->server_hostkey_sha256); + session->server_hostkey_sha256_valid = TRUE; + } + else { + session->server_hostkey_sha256_valid = FALSE; + } + } +#ifdef LIBSSH2DEBUG + { + char *base64Fingerprint = NULL; + _libssh2_base64_encode(session, + (const char *) + session->server_hostkey_sha256, + SHA256_DIGEST_LENGTH, &base64Fingerprint); + if(base64Fingerprint != NULL) { + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server's SHA256 Fingerprint: %s", + base64Fingerprint); + LIBSSH2_FREE(session, base64Fingerprint); + } } #endif /* LIBSSH2DEBUG */ - if (session->hostkey->init(session, session->server_hostkey, + if(session->hostkey->init(session, session->server_hostkey, session->server_hostkey_len, &session->server_hostkey_abstract)) { ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, @@ -933,751 +2493,454 @@ static int diffie_hellman_sha256(LIBSSH2_SESSION *session, goto clean_exit; } - exchange_state->f_value_len = _libssh2_ntohu32(exchange_state->s); - exchange_state->s += 4; - exchange_state->f_value = exchange_state->s; - exchange_state->s += exchange_state->f_value_len; - _libssh2_bn_from_bin(exchange_state->f, exchange_state->f_value_len, - exchange_state->f_value); - - exchange_state->h_sig_len = _libssh2_ntohu32(exchange_state->s); - exchange_state->s += 4; - exchange_state->h_sig = exchange_state->s; - - /* Compute the shared secret */ - _libssh2_bn_mod_exp(exchange_state->k, exchange_state->f, - exchange_state->x, p, exchange_state->ctx); - exchange_state->k_value_len = _libssh2_bn_bytes(exchange_state->k) + 5; - if (_libssh2_bn_bits(exchange_state->k) % 8) { - /* don't need leading 00 */ - exchange_state->k_value_len--; - } - exchange_state->k_value = - LIBSSH2_ALLOC(session, exchange_state->k_value_len); - if (!exchange_state->k_value) { - ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Unable to allocate buffer for K"); + /* server public key Q_S */ + if(_libssh2_get_string(&buf, &server_public_key, + &server_public_key_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected key length"); goto clean_exit; } - _libssh2_htonu32(exchange_state->k_value, - exchange_state->k_value_len - 4); - if (_libssh2_bn_bits(exchange_state->k) % 8) { - _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 4); - } else { - exchange_state->k_value[4] = 0; - _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 5); - } - - exchange_state->exchange_hash = (void*)&exchange_hash_ctx; - libssh2_sha256_init(&exchange_hash_ctx); - - if (session->local.banner) { - _libssh2_htonu32(exchange_state->h_sig_comp, - strlen((char *) session->local.banner) - 2); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha256_update(exchange_hash_ctx, - (char *) session->local.banner, - strlen((char *) session->local.banner) - 2); - } else { - _libssh2_htonu32(exchange_state->h_sig_comp, - sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha256_update(exchange_hash_ctx, - LIBSSH2_SSH_DEFAULT_BANNER, - sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); - } - - _libssh2_htonu32(exchange_state->h_sig_comp, - strlen((char *) session->remote.banner)); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha256_update(exchange_hash_ctx, - session->remote.banner, - strlen((char *) session->remote.banner)); - - _libssh2_htonu32(exchange_state->h_sig_comp, - session->local.kexinit_len); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha256_update(exchange_hash_ctx, - session->local.kexinit, - session->local.kexinit_len); - - _libssh2_htonu32(exchange_state->h_sig_comp, - session->remote.kexinit_len); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha256_update(exchange_hash_ctx, - session->remote.kexinit, - session->remote.kexinit_len); - - _libssh2_htonu32(exchange_state->h_sig_comp, - session->server_hostkey_len); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha256_update(exchange_hash_ctx, - session->server_hostkey, - session->server_hostkey_len); - if (packet_type_init == SSH_MSG_KEX_DH_GEX_INIT) { - /* diffie-hellman-group-exchange hashes additional fields */ -#ifdef LIBSSH2_DH_GEX_NEW - _libssh2_htonu32(exchange_state->h_sig_comp, - LIBSSH2_DH_GEX_MINGROUP); - _libssh2_htonu32(exchange_state->h_sig_comp + 4, - LIBSSH2_DH_GEX_OPTGROUP); - _libssh2_htonu32(exchange_state->h_sig_comp + 8, - LIBSSH2_DH_GEX_MAXGROUP); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 12); -#else - _libssh2_htonu32(exchange_state->h_sig_comp, - LIBSSH2_DH_GEX_OPTGROUP); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); -#endif + if(server_public_key_len != LIBSSH2_ED25519_KEY_LEN) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Unexpected curve25519 server " + "public key length"); + goto clean_exit; } - if (midhash) { - libssh2_sha256_update(exchange_hash_ctx, midhash, - midhash_len); + /* server signature */ + if(_libssh2_get_string(&buf, &exchange_state->h_sig, + &(exchange_state->h_sig_len))) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, + "Unexpected curve25519 server sig length"); + goto clean_exit; } - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->e_packet + 1, - exchange_state->e_packet_len - 1); + /* Compute the shared secret K */ + rc = _libssh2_curve25519_gen_k(&exchange_state->k, private_key, + server_public_key); + if(rc != 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_KEX_FAILURE, + "Unable to create ECDH shared secret"); + goto clean_exit; + } - _libssh2_htonu32(exchange_state->h_sig_comp, - exchange_state->f_value_len); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->h_sig_comp, 4); - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->f_value, - exchange_state->f_value_len); - - libssh2_sha256_update(exchange_hash_ctx, - exchange_state->k_value, - exchange_state->k_value_len); - - libssh2_sha256_final(exchange_hash_ctx, - exchange_state->h_sig_comp); - - if (session->hostkey-> - sig_verify(session, exchange_state->h_sig, - exchange_state->h_sig_len, exchange_state->h_sig_comp, - SHA256_DIGEST_LENGTH, &session->server_hostkey_abstract)) { - ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_SIGN, - "Unable to verify hostkey signature"); + exchange_state->k_value_len = _libssh2_bn_bytes(exchange_state->k) + 5; + if(_libssh2_bn_bits(exchange_state->k) % 8) { + /* don't need leading 00 */ + exchange_state->k_value_len--; + } + exchange_state->k_value = + LIBSSH2_ALLOC(session, exchange_state->k_value_len); + if(!exchange_state->k_value) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate buffer for K"); goto clean_exit; } + _libssh2_htonu32(exchange_state->k_value, + exchange_state->k_value_len - 4); + if(_libssh2_bn_bits(exchange_state->k) % 8) { + _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 4); + } + else { + exchange_state->k_value[4] = 0; + _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 5); + } + /*/ verify hash */ + LIBSSH2_KEX_METHOD_EC_SHA_HASH_CREATE_VERIFY(256); + if(rc != 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_SIGN, + "Unable to verify hostkey signature"); + goto clean_exit; + } - _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Sending NEWKEYS message"); exchange_state->c = SSH_MSG_NEWKEYS; - - exchange_state->state = libssh2_NB_state_sent2; + exchange_state->state = libssh2_NB_state_sent; } - if (exchange_state->state == libssh2_NB_state_sent2) { + if(exchange_state->state == libssh2_NB_state_sent) { rc = _libssh2_transport_send(session, &exchange_state->c, 1, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { - ret = _libssh2_error(session, rc, "Unable to send NEWKEYS message"); + } + else if(rc) { + ret = _libssh2_error(session, rc, + "Unable to send NEWKEYS message"); goto clean_exit; } - exchange_state->state = libssh2_NB_state_sent3; + exchange_state->state = libssh2_NB_state_sent2; } - if (exchange_state->state == libssh2_NB_state_sent3) { + if(exchange_state->state == libssh2_NB_state_sent2) { rc = _libssh2_packet_require(session, SSH_MSG_NEWKEYS, &exchange_state->tmp, &exchange_state->tmp_len, 0, NULL, 0, &exchange_state->req_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { ret = _libssh2_error(session, rc, "Timed out waiting for NEWKEYS"); goto clean_exit; } - /* The first key exchange has been performed, - switch to active crypt/comp/mac mode */ + + /* The first key exchange has been performed, switch to active + crypt/comp/mac mode */ + session->state |= LIBSSH2_STATE_NEWKEYS; _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Received NEWKEYS message"); - /* This will actually end up being just packet_type(1) - for this packet type anyway */ + /* This will actually end up being just packet_type(1) for this packet + type anyway */ LIBSSH2_FREE(session, exchange_state->tmp); - if (!session->session_id) { - session->session_id = LIBSSH2_ALLOC(session, SHA256_DIGEST_LENGTH); - if (!session->session_id) { + if(!session->session_id) { + + size_t digest_length = SHA256_DIGEST_LENGTH; + session->session_id = LIBSSH2_ALLOC(session, digest_length); + if(!session->session_id) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Unable to allocate buffer for SHA digest"); + "Unable to allxcocate buffer for " + "SHA digest"); goto clean_exit; } memcpy(session->session_id, exchange_state->h_sig_comp, - SHA256_DIGEST_LENGTH); - session->session_id_len = SHA256_DIGEST_LENGTH; - _libssh2_debug(session, LIBSSH2_TRACE_KEX, "session_id calculated"); + digest_length); + session->session_id_len = digest_length; + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "session_id calculated"); } /* Cleanup any existing cipher */ - if (session->local.crypt->dtor) { + if(session->local.crypt->dtor) { session->local.crypt->dtor(session, - &session->local.crypt_abstract); + &session->local.crypt_abstract); } /* Calculate IV/Secret/Key for each direction */ - if (session->local.crypt->init) { + if(session->local.crypt->init) { unsigned char *iv = NULL, *secret = NULL; int free_iv = 0, free_secret = 0; - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA256_HASH(iv, - session->local.crypt-> - iv_len, "A"); - if (!iv) { + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, iv, + session->local.crypt-> + iv_len, "A"); + if(!iv) { ret = -1; goto clean_exit; } - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA256_HASH(secret, - session->local.crypt-> - secret_len, "C"); - if (!secret) { + + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, secret, + session->local.crypt-> + secret_len, "C"); + + if(!secret) { LIBSSH2_FREE(session, iv); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } - if (session->local.crypt-> + if(session->local.crypt-> init(session, session->local.crypt, iv, &free_iv, secret, &free_secret, 1, &session->local.crypt_abstract)) { - LIBSSH2_FREE(session, iv); - LIBSSH2_FREE(session, secret); - ret = LIBSSH2_ERROR_KEX_FAILURE; - goto clean_exit; - } + LIBSSH2_FREE(session, iv); + LIBSSH2_FREE(session, secret); + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } - if (free_iv) { - memset(iv, 0, session->local.crypt->iv_len); + if(free_iv) { + _libssh2_explicit_zero(iv, session->local.crypt->iv_len); LIBSSH2_FREE(session, iv); } - if (free_secret) { - memset(secret, 0, session->local.crypt->secret_len); + if(free_secret) { + _libssh2_explicit_zero(secret, + session->local.crypt->secret_len); LIBSSH2_FREE(session, secret); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Client to Server IV and Key calculated"); + "Client to Server IV and Key calculated"); - if (session->remote.crypt->dtor) { + if(session->remote.crypt->dtor) { /* Cleanup any existing cipher */ session->remote.crypt->dtor(session, &session->remote.crypt_abstract); } - if (session->remote.crypt->init) { + if(session->remote.crypt->init) { unsigned char *iv = NULL, *secret = NULL; int free_iv = 0, free_secret = 0; - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA256_HASH(iv, - session->remote.crypt-> - iv_len, "B"); - if (!iv) { + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, iv, + session->remote.crypt-> + iv_len, "B"); + + if(!iv) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA256_HASH(secret, - session->remote.crypt-> - secret_len, "D"); - if (!secret) { + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, secret, + session->remote.crypt-> + secret_len, "D"); + + if(!secret) { LIBSSH2_FREE(session, iv); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } - if (session->remote.crypt-> + if(session->remote.crypt-> init(session, session->remote.crypt, iv, &free_iv, secret, &free_secret, 0, &session->remote.crypt_abstract)) { - LIBSSH2_FREE(session, iv); - LIBSSH2_FREE(session, secret); - ret = LIBSSH2_ERROR_KEX_FAILURE; - goto clean_exit; - } + LIBSSH2_FREE(session, iv); + LIBSSH2_FREE(session, secret); + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } - if (free_iv) { - memset(iv, 0, session->remote.crypt->iv_len); + if(free_iv) { + _libssh2_explicit_zero(iv, session->remote.crypt->iv_len); LIBSSH2_FREE(session, iv); } - if (free_secret) { - memset(secret, 0, session->remote.crypt->secret_len); + if(free_secret) { + _libssh2_explicit_zero(secret, + session->remote.crypt->secret_len); LIBSSH2_FREE(session, secret); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Server to Client IV and Key calculated"); + "Server to Client IV and Key calculated"); - if (session->local.mac->dtor) { + if(session->local.mac->dtor) { session->local.mac->dtor(session, &session->local.mac_abstract); } - if (session->local.mac->init) { - unsigned char *key = NULL; - int free_key = 0; - - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA256_HASH(key, - session->local.mac-> - key_len, "E"); - if (!key) { - ret = LIBSSH2_ERROR_KEX_FAILURE; - goto clean_exit; - } - session->local.mac->init(session, key, &free_key, - &session->local.mac_abstract); - - if (free_key) { - memset(key, 0, session->local.mac->key_len); - LIBSSH2_FREE(session, key); - } - } - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Client to Server HMAC Key calculated"); - - if (session->remote.mac->dtor) { - session->remote.mac->dtor(session, &session->remote.mac_abstract); - } - - if (session->remote.mac->init) { + if(session->local.mac->init) { unsigned char *key = NULL; int free_key = 0; - LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA256_HASH(key, - session->remote.mac-> - key_len, "F"); - if (!key) { - ret = LIBSSH2_ERROR_KEX_FAILURE; - goto clean_exit; - } - session->remote.mac->init(session, key, &free_key, - &session->remote.mac_abstract); - - if (free_key) { - memset(key, 0, session->remote.mac->key_len); - LIBSSH2_FREE(session, key); - } - } - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Server to Client HMAC Key calculated"); - - /* Initialize compression for each direction */ - - /* Cleanup any existing compression */ - if (session->local.comp && session->local.comp->dtor) { - session->local.comp->dtor(session, 1, - &session->local.comp_abstract); - } - - if (session->local.comp && session->local.comp->init) { - if (session->local.comp->init(session, 1, - &session->local.comp_abstract)) { - ret = LIBSSH2_ERROR_KEX_FAILURE; - goto clean_exit; - } - } - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Client to Server compression initialized"); - - if (session->remote.comp && session->remote.comp->dtor) { - session->remote.comp->dtor(session, 0, - &session->remote.comp_abstract); - } - - if (session->remote.comp && session->remote.comp->init) { - if (session->remote.comp->init(session, 0, - &session->remote.comp_abstract)) { - ret = LIBSSH2_ERROR_KEX_FAILURE; - goto clean_exit; - } - } - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Server to Client compression initialized"); - - } - - clean_exit: - _libssh2_bn_free(exchange_state->x); - exchange_state->x = NULL; - _libssh2_bn_free(exchange_state->e); - exchange_state->e = NULL; - _libssh2_bn_free(exchange_state->f); - exchange_state->f = NULL; - _libssh2_bn_free(exchange_state->k); - exchange_state->k = NULL; - _libssh2_bn_ctx_free(exchange_state->ctx); - exchange_state->ctx = NULL; - - if (exchange_state->e_packet) { - LIBSSH2_FREE(session, exchange_state->e_packet); - exchange_state->e_packet = NULL; - } - - if (exchange_state->s_packet) { - LIBSSH2_FREE(session, exchange_state->s_packet); - exchange_state->s_packet = NULL; - } - - if (exchange_state->k_value) { - LIBSSH2_FREE(session, exchange_state->k_value); - exchange_state->k_value = NULL; - } - - exchange_state->state = libssh2_NB_state_idle; - - return ret; -} - - - -/* kex_method_diffie_hellman_group1_sha1_key_exchange - * Diffie-Hellman Group1 (Actually Group2) Key Exchange using SHA1 - */ -static int -kex_method_diffie_hellman_group1_sha1_key_exchange(LIBSSH2_SESSION *session, - key_exchange_state_low_t - * key_state) -{ - static const unsigned char p_value[128] = { - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34, - 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1, - 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74, - 0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x13, 0x9B, 0x22, - 0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD, - 0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B, - 0x30, 0x2B, 0x0A, 0x6D, 0xF2, 0x5F, 0x14, 0x37, - 0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45, - 0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6, - 0xF4, 0x4C, 0x42, 0xE9, 0xA6, 0x37, 0xED, 0x6B, - 0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED, - 0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5, - 0xAE, 0x9F, 0x24, 0x11, 0x7C, 0x4B, 0x1F, 0xE6, - 0x49, 0x28, 0x66, 0x51, 0xEC, 0xE6, 0x53, 0x81, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF - }; - - int ret; - - if (key_state->state == libssh2_NB_state_idle) { - /* g == 2 */ - key_state->p = _libssh2_bn_init_from_bin(); /* SSH2 defined value (p_value) */ - key_state->g = _libssh2_bn_init(); /* SSH2 defined value (2) */ - - /* Initialize P and G */ - _libssh2_bn_set_word(key_state->g, 2); - _libssh2_bn_from_bin(key_state->p, 128, p_value); - - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Initiating Diffie-Hellman Group1 Key Exchange"); - - key_state->state = libssh2_NB_state_created; - } - ret = diffie_hellman_sha1(session, key_state->g, key_state->p, 128, - SSH_MSG_KEXDH_INIT, SSH_MSG_KEXDH_REPLY, - NULL, 0, &key_state->exchange_state); - if (ret == LIBSSH2_ERROR_EAGAIN) { - return ret; - } - - _libssh2_bn_free(key_state->p); - key_state->p = NULL; - _libssh2_bn_free(key_state->g); - key_state->g = NULL; - key_state->state = libssh2_NB_state_idle; - - return ret; -} - - - -/* kex_method_diffie_hellman_group14_sha1_key_exchange - * Diffie-Hellman Group14 Key Exchange using SHA1 - */ -static int -kex_method_diffie_hellman_group14_sha1_key_exchange(LIBSSH2_SESSION *session, - key_exchange_state_low_t - * key_state) -{ - static const unsigned char p_value[256] = { - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xC9, 0x0F, 0xDA, 0xA2, 0x21, 0x68, 0xC2, 0x34, - 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1, - 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74, - 0x02, 0x0B, 0xBE, 0xA6, 0x3B, 0x13, 0x9B, 0x22, - 0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD, - 0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B, - 0x30, 0x2B, 0x0A, 0x6D, 0xF2, 0x5F, 0x14, 0x37, - 0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45, - 0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6, - 0xF4, 0x4C, 0x42, 0xE9, 0xA6, 0x37, 0xED, 0x6B, - 0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED, - 0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5, - 0xAE, 0x9F, 0x24, 0x11, 0x7C, 0x4B, 0x1F, 0xE6, - 0x49, 0x28, 0x66, 0x51, 0xEC, 0xE4, 0x5B, 0x3D, - 0xC2, 0x00, 0x7C, 0xB8, 0xA1, 0x63, 0xBF, 0x05, - 0x98, 0xDA, 0x48, 0x36, 0x1C, 0x55, 0xD3, 0x9A, - 0x69, 0x16, 0x3F, 0xA8, 0xFD, 0x24, 0xCF, 0x5F, - 0x83, 0x65, 0x5D, 0x23, 0xDC, 0xA3, 0xAD, 0x96, - 0x1C, 0x62, 0xF3, 0x56, 0x20, 0x85, 0x52, 0xBB, - 0x9E, 0xD5, 0x29, 0x07, 0x70, 0x96, 0x96, 0x6D, - 0x67, 0x0C, 0x35, 0x4E, 0x4A, 0xBC, 0x98, 0x04, - 0xF1, 0x74, 0x6C, 0x08, 0xCA, 0x18, 0x21, 0x7C, - 0x32, 0x90, 0x5E, 0x46, 0x2E, 0x36, 0xCE, 0x3B, - 0xE3, 0x9E, 0x77, 0x2C, 0x18, 0x0E, 0x86, 0x03, - 0x9B, 0x27, 0x83, 0xA2, 0xEC, 0x07, 0xA2, 0x8F, - 0xB5, 0xC5, 0x5D, 0xF0, 0x6F, 0x4C, 0x52, 0xC9, - 0xDE, 0x2B, 0xCB, 0xF6, 0x95, 0x58, 0x17, 0x18, - 0x39, 0x95, 0x49, 0x7C, 0xEA, 0x95, 0x6A, 0xE5, - 0x15, 0xD2, 0x26, 0x18, 0x98, 0xFA, 0x05, 0x10, - 0x15, 0x72, 0x8E, 0x5A, 0x8A, 0xAC, 0xAA, 0x68, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF - }; - int ret; - - if (key_state->state == libssh2_NB_state_idle) { - key_state->p = _libssh2_bn_init_from_bin(); /* SSH2 defined value (p_value) */ - key_state->g = _libssh2_bn_init(); /* SSH2 defined value (2) */ - - /* g == 2 */ - /* Initialize P and G */ - _libssh2_bn_set_word(key_state->g, 2); - _libssh2_bn_from_bin(key_state->p, 256, p_value); - - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Initiating Diffie-Hellman Group14 Key Exchange"); + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, key, + session->local.mac-> + key_len, "E"); - key_state->state = libssh2_NB_state_created; - } - ret = diffie_hellman_sha1(session, key_state->g, key_state->p, - 256, SSH_MSG_KEXDH_INIT, SSH_MSG_KEXDH_REPLY, - NULL, 0, &key_state->exchange_state); - if (ret == LIBSSH2_ERROR_EAGAIN) { - return ret; - } + if(!key) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + session->local.mac->init(session, key, &free_key, + &session->local.mac_abstract); - key_state->state = libssh2_NB_state_idle; - _libssh2_bn_free(key_state->p); - key_state->p = NULL; - _libssh2_bn_free(key_state->g); - key_state->g = NULL; + if(free_key) { + _libssh2_explicit_zero(key, session->local.mac->key_len); + LIBSSH2_FREE(session, key); + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Client to Server HMAC Key calculated"); - return ret; -} + if(session->remote.mac->dtor) { + session->remote.mac->dtor(session, &session->remote.mac_abstract); + } + if(session->remote.mac->init) { + unsigned char *key = NULL; + int free_key = 0; + LIBSSH2_KEX_METHOD_SHA_VALUE_HASH(256, key, + session->remote.mac-> + key_len, "F"); -/* kex_method_diffie_hellman_group_exchange_sha1_key_exchange - * Diffie-Hellman Group Exchange Key Exchange using SHA1 - * Negotiates random(ish) group for secret derivation - */ -static int -kex_method_diffie_hellman_group_exchange_sha1_key_exchange -(LIBSSH2_SESSION * session, key_exchange_state_low_t * key_state) -{ - unsigned long p_len, g_len; - int ret = 0; - int rc; + if(!key) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + session->remote.mac->init(session, key, &free_key, + &session->remote.mac_abstract); - if (key_state->state == libssh2_NB_state_idle) { - key_state->p = _libssh2_bn_init_from_bin(); - key_state->g = _libssh2_bn_init_from_bin(); - /* Ask for a P and G pair */ -#ifdef LIBSSH2_DH_GEX_NEW - key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST; - _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_MINGROUP); - _libssh2_htonu32(key_state->request + 5, LIBSSH2_DH_GEX_OPTGROUP); - _libssh2_htonu32(key_state->request + 9, LIBSSH2_DH_GEX_MAXGROUP); - key_state->request_len = 13; - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Initiating Diffie-Hellman Group-Exchange (New Method)"); -#else - key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST_OLD; - _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_OPTGROUP); - key_state->request_len = 5; + if(free_key) { + _libssh2_explicit_zero(key, session->remote.mac->key_len); + LIBSSH2_FREE(session, key); + } + } _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Initiating Diffie-Hellman Group-Exchange (Old Method)"); -#endif + "Server to Client HMAC Key calculated"); - key_state->state = libssh2_NB_state_created; - } + /* Initialize compression for each direction */ - if (key_state->state == libssh2_NB_state_created) { - rc = _libssh2_transport_send(session, key_state->request, - key_state->request_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return rc; - } else if (rc) { - ret = _libssh2_error(session, rc, - "Unable to send Group Exchange Request"); - goto dh_gex_clean_exit; + /* Cleanup any existing compression */ + if(session->local.comp && session->local.comp->dtor) { + session->local.comp->dtor(session, 1, + &session->local.comp_abstract); } - key_state->state = libssh2_NB_state_sent; - } + if(session->local.comp && session->local.comp->init) { + if(session->local.comp->init(session, 1, + &session->local.comp_abstract)) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Client to Server compression initialized"); - if (key_state->state == libssh2_NB_state_sent) { - rc = _libssh2_packet_require(session, SSH_MSG_KEX_DH_GEX_GROUP, - &key_state->data, &key_state->data_len, - 0, NULL, 0, &key_state->req_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return rc; - } else if (rc) { - ret = _libssh2_error(session, rc, - "Timeout waiting for GEX_GROUP reply"); - goto dh_gex_clean_exit; + if(session->remote.comp && session->remote.comp->dtor) { + session->remote.comp->dtor(session, 0, + &session->remote.comp_abstract); } - key_state->state = libssh2_NB_state_sent1; + if(session->remote.comp && session->remote.comp->init) { + if(session->remote.comp->init(session, 0, + &session->remote.comp_abstract)) { + ret = LIBSSH2_ERROR_KEX_FAILURE; + goto clean_exit; + } + } + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Server to Client compression initialized"); } - if (key_state->state == libssh2_NB_state_sent1) { - unsigned char *s = key_state->data + 1; - p_len = _libssh2_ntohu32(s); - s += 4; - _libssh2_bn_from_bin(key_state->p, p_len, s); - s += p_len; - - g_len = _libssh2_ntohu32(s); - s += 4; - _libssh2_bn_from_bin(key_state->g, g_len, s); - - ret = diffie_hellman_sha1(session, key_state->g, key_state->p, p_len, - SSH_MSG_KEX_DH_GEX_INIT, - SSH_MSG_KEX_DH_GEX_REPLY, - key_state->data + 1, - key_state->data_len - 1, - &key_state->exchange_state); - if (ret == LIBSSH2_ERROR_EAGAIN) { - return ret; - } +clean_exit: + _libssh2_bn_free(exchange_state->k); + exchange_state->k = NULL; - LIBSSH2_FREE(session, key_state->data); + if(exchange_state->k_value) { + LIBSSH2_FREE(session, exchange_state->k_value); + exchange_state->k_value = NULL; } - dh_gex_clean_exit: - key_state->state = libssh2_NB_state_idle; - _libssh2_bn_free(key_state->g); - key_state->g = NULL; - _libssh2_bn_free(key_state->p); - key_state->p = NULL; + exchange_state->state = libssh2_NB_state_idle; return ret; } - - -/* kex_method_diffie_hellman_group_exchange_sha256_key_exchange - * Diffie-Hellman Group Exchange Key Exchange using SHA256 - * Negotiates random(ish) group for secret derivation +/* kex_method_curve25519_key_exchange + * + * Elliptic Curve X25519 Key Exchange with SHA256 hash + * */ + static int -kex_method_diffie_hellman_group_exchange_sha256_key_exchange +kex_method_curve25519_key_exchange (LIBSSH2_SESSION * session, key_exchange_state_low_t * key_state) { - unsigned long p_len, g_len; int ret = 0; - int rc; + int rc = 0; - if (key_state->state == libssh2_NB_state_idle) { - key_state->p = _libssh2_bn_init(); - key_state->g = _libssh2_bn_init(); - /* Ask for a P and G pair */ -#ifdef LIBSSH2_DH_GEX_NEW - key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST; - _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_MINGROUP); - _libssh2_htonu32(key_state->request + 5, LIBSSH2_DH_GEX_OPTGROUP); - _libssh2_htonu32(key_state->request + 9, LIBSSH2_DH_GEX_MAXGROUP); - key_state->request_len = 13; - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Initiating Diffie-Hellman Group-Exchange (New Method SHA256)"); -#else - key_state->request[0] = SSH_MSG_KEX_DH_GEX_REQUEST_OLD; - _libssh2_htonu32(key_state->request + 1, LIBSSH2_DH_GEX_OPTGROUP); - key_state->request_len = 5; - _libssh2_debug(session, LIBSSH2_TRACE_KEX, - "Initiating Diffie-Hellman Group-Exchange (Old Method SHA256)"); -#endif + if(key_state->state == libssh2_NB_state_idle) { + key_state->public_key_oct = NULL; key_state->state = libssh2_NB_state_created; } - if (key_state->state == libssh2_NB_state_created) { + if(key_state->state == libssh2_NB_state_created) { + unsigned char *s = NULL; + + rc = strcmp(session->kex->name, "curve25519-sha256@libssh.org"); + if(rc != 0) + rc = strcmp(session->kex->name, "curve25519-sha256"); + + if(rc != 0) { + ret = _libssh2_error(session, -1, + "Unknown KEX curve25519 curve type"); + goto clean_exit; + } + + rc = _libssh2_curve25519_new(session, + &key_state->curve25519_public_key, + &key_state->curve25519_private_key); + + if(rc != 0) { + ret = _libssh2_error(session, rc, + "Unable to create private key"); + goto clean_exit; + } + + key_state->request[0] = SSH2_MSG_KEX_ECDH_INIT; + s = key_state->request + 1; + _libssh2_store_str(&s, (const char *)key_state->curve25519_public_key, + LIBSSH2_ED25519_KEY_LEN); + key_state->request_len = LIBSSH2_ED25519_KEY_LEN + 5; + + _libssh2_debug(session, LIBSSH2_TRACE_KEX, + "Initiating curve25519 SHA2"); + + key_state->state = libssh2_NB_state_sent; + } + + if(key_state->state == libssh2_NB_state_sent) { rc = _libssh2_transport_send(session, key_state->request, key_state->request_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { ret = _libssh2_error(session, rc, - "Unable to send Group Exchange Request SHA256"); - goto dh_gex_clean_exit; + "Unable to send ECDH_INIT"); + goto clean_exit; } - key_state->state = libssh2_NB_state_sent; + key_state->state = libssh2_NB_state_sent1; } - if (key_state->state == libssh2_NB_state_sent) { - rc = _libssh2_packet_require(session, SSH_MSG_KEX_DH_GEX_GROUP, + if(key_state->state == libssh2_NB_state_sent1) { + rc = _libssh2_packet_require(session, SSH2_MSG_KEX_ECDH_REPLY, &key_state->data, &key_state->data_len, 0, NULL, 0, &key_state->req_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { ret = _libssh2_error(session, rc, - "Timeout waiting for GEX_GROUP reply SHA256"); - goto dh_gex_clean_exit; + "Timeout waiting for ECDH_REPLY reply"); + goto clean_exit; } - key_state->state = libssh2_NB_state_sent1; + key_state->state = libssh2_NB_state_sent2; } - if (key_state->state == libssh2_NB_state_sent1) { - unsigned char *s = key_state->data + 1; - p_len = _libssh2_ntohu32(s); - s += 4; - _libssh2_bn_from_bin(key_state->p, p_len, s); - s += p_len; - - g_len = _libssh2_ntohu32(s); - s += 4; - _libssh2_bn_from_bin(key_state->g, g_len, s); - - ret = diffie_hellman_sha256(session, key_state->g, key_state->p, p_len, - SSH_MSG_KEX_DH_GEX_INIT, - SSH_MSG_KEX_DH_GEX_REPLY, - key_state->data + 1, - key_state->data_len - 1, - &key_state->exchange_state); - if (ret == LIBSSH2_ERROR_EAGAIN) { + if(key_state->state == libssh2_NB_state_sent2) { + + ret = curve25519_sha256(session, key_state->data, key_state->data_len, + key_state->curve25519_public_key, + key_state->curve25519_private_key, + &key_state->exchange_state); + + if(ret == LIBSSH2_ERROR_EAGAIN) { return ret; } LIBSSH2_FREE(session, key_state->data); } - dh_gex_clean_exit: +clean_exit: + + if(key_state->curve25519_public_key) { + _libssh2_explicit_zero(key_state->curve25519_public_key, + LIBSSH2_ED25519_KEY_LEN); + LIBSSH2_FREE(session, key_state->curve25519_public_key); + key_state->curve25519_public_key = NULL; + } + + if(key_state->curve25519_private_key) { + _libssh2_explicit_zero(key_state->curve25519_private_key, + LIBSSH2_ED25519_KEY_LEN); + LIBSSH2_FREE(session, key_state->curve25519_private_key); + key_state->curve25519_private_key = NULL; + } + key_state->state = libssh2_NB_state_idle; - _libssh2_bn_free(key_state->g); - key_state->g = NULL; - _libssh2_bn_free(key_state->p); - key_state->p = NULL; return ret; } +#endif /*LIBSSH2_ED25519*/ + + #define LIBSSH2_KEX_METHOD_FLAG_REQ_ENC_HOSTKEY 0x0001 #define LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY 0x0002 @@ -1693,6 +2956,24 @@ static const LIBSSH2_KEX_METHOD kex_method_diffie_helman_group14_sha1 = { LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, }; +static const LIBSSH2_KEX_METHOD kex_method_diffie_helman_group14_sha256 = { + "diffie-hellman-group14-sha256", + kex_method_diffie_hellman_group14_sha256_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; + +static const LIBSSH2_KEX_METHOD kex_method_diffie_helman_group16_sha512 = { + "diffie-hellman-group16-sha512", + kex_method_diffie_hellman_group16_sha512_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; + +static const LIBSSH2_KEX_METHOD kex_method_diffie_helman_group18_sha512 = { + "diffie-hellman-group18-sha512", + kex_method_diffie_hellman_group18_sha512_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; + static const LIBSSH2_KEX_METHOD kex_method_diffie_helman_group_exchange_sha1 = { "diffie-hellman-group-exchange-sha1", @@ -1707,12 +2988,74 @@ kex_method_diffie_helman_group_exchange_sha256 = { LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, }; +#if LIBSSH2_ECDSA +static const LIBSSH2_KEX_METHOD +kex_method_ecdh_sha2_nistp256 = { + "ecdh-sha2-nistp256", + kex_method_ecdh_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; + +static const LIBSSH2_KEX_METHOD +kex_method_ecdh_sha2_nistp384 = { + "ecdh-sha2-nistp384", + kex_method_ecdh_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; + +static const LIBSSH2_KEX_METHOD +kex_method_ecdh_sha2_nistp521 = { + "ecdh-sha2-nistp521", + kex_method_ecdh_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; +#endif + +#if LIBSSH2_ED25519 +static const LIBSSH2_KEX_METHOD +kex_method_ssh_curve25519_sha256_libssh = { + "curve25519-sha256@libssh.org", + kex_method_curve25519_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; +static const LIBSSH2_KEX_METHOD +kex_method_ssh_curve25519_sha256 = { + "curve25519-sha256", + kex_method_curve25519_key_exchange, + LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY, +}; +#endif + +/* this kex method signals that client can receive extensions + * as described in https://datatracker.ietf.org/doc/html/rfc8308 +*/ + +static const LIBSSH2_KEX_METHOD +kex_method_extension_negotiation = { + "ext-info-c", + NULL, + 0, +}; + static const LIBSSH2_KEX_METHOD *libssh2_kex_methods[] = { +#if LIBSSH2_ED25519 + &kex_method_ssh_curve25519_sha256, + &kex_method_ssh_curve25519_sha256_libssh, +#endif +#if LIBSSH2_ECDSA + &kex_method_ecdh_sha2_nistp256, + &kex_method_ecdh_sha2_nistp384, + &kex_method_ecdh_sha2_nistp521, +#endif &kex_method_diffie_helman_group_exchange_sha256, - &kex_method_diffie_helman_group_exchange_sha1, + &kex_method_diffie_helman_group16_sha512, + &kex_method_diffie_helman_group18_sha512, + &kex_method_diffie_helman_group14_sha256, &kex_method_diffie_helman_group14_sha1, &kex_method_diffie_helman_group1_sha1, - NULL + &kex_method_diffie_helman_group_exchange_sha1, + &kex_method_extension_negotiation, + NULL }; typedef struct _LIBSSH2_COMMON_METHOD @@ -1722,7 +3065,8 @@ typedef struct _LIBSSH2_COMMON_METHOD /* kex_method_strlen * Calculate the length of a particular method list's resulting string - * Includes SUM(strlen() of each individual method plus 1 (for coma)) - 1 (because the last coma isn't used) + * Includes SUM(strlen() of each individual method plus 1 (for coma)) - 1 + * (because the last coma isn't used) * Another sign of bad coding practices gone mad. Pretend you don't see this. */ static size_t @@ -1730,11 +3074,11 @@ kex_method_strlen(LIBSSH2_COMMON_METHOD ** method) { size_t len = 0; - if (!method || !*method) { + if(!method || !*method) { return 0; } - while (*method && (*method)->name) { + while(*method && (*method)->name) { len += strlen((*method)->name) + 1; method++; } @@ -1754,11 +3098,11 @@ kex_method_list(unsigned char *buf, size_t list_strlen, _libssh2_htonu32(buf, list_strlen); buf += 4; - if (!method || !*method) { + if(!method || !*method) { return 4; } - while (*method && (*method)->name) { + while(*method && (*method)->name) { int mlen = strlen((*method)->name); memcpy(buf, (*method)->name, mlen); buf += mlen; @@ -1776,12 +3120,13 @@ kex_method_list(unsigned char *buf, size_t list_strlen, kex_method_strlen((LIBSSH2_COMMON_METHOD**)(defaultvar))) #define LIBSSH2_METHOD_PREFS_STR(buf, prefvarlen, prefvar, defaultvar) \ - if (prefvar) { \ + if(prefvar) { \ _libssh2_htonu32((buf), (prefvarlen)); \ buf += 4; \ memcpy((buf), (prefvar), (prefvarlen)); \ buf += (prefvarlen); \ - } else { \ + } \ + else { \ buf += kex_method_list((buf), (prefvarlen), \ (LIBSSH2_COMMON_METHOD**)(defaultvar)); \ } @@ -1802,7 +3147,7 @@ static int kexinit(LIBSSH2_SESSION * session) unsigned char *data, *s; int rc; - if (session->kexinit_state == libssh2_NB_state_idle) { + if(session->kexinit_state == libssh2_NB_state_idle) { kex_len = LIBSSH2_METHOD_PREFS_LEN(session->kex_prefs, libssh2_kex_methods); hostkey_len = @@ -1836,14 +3181,18 @@ static int kexinit(LIBSSH2_SESSION * session) lang_cs_len + lang_sc_len; s = data = LIBSSH2_ALLOC(session, data_len); - if (!data) { + if(!data) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory"); } *(s++) = SSH_MSG_KEXINIT; - _libssh2_random(s, 16); + if(_libssh2_random(s, 16)) { + return _libssh2_error(session, LIBSSH2_ERROR_RANDGEN, + "Unable to get random bytes " + "for KEXINIT cookie"); + } s += 16; /* Ennumerating through these lists twice is probably (certainly?) @@ -1883,7 +3232,7 @@ static int kexinit(LIBSSH2_SESSION * session) #ifdef LIBSSH2DEBUG { /* Funnily enough, they'll all "appear" to be '\0' terminated */ - unsigned char *p = data + 21; /* type(1) + cookie(16) + len(4) */ + unsigned char *p = data + 21; /* type(1) + cookie(16) + len(4) */ _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Sent KEX: %s", p); p += kex_len + 4; @@ -1909,7 +3258,8 @@ static int kexinit(LIBSSH2_SESSION * session) #endif /* LIBSSH2DEBUG */ session->kexinit_state = libssh2_NB_state_created; - } else { + } + else { data = session->kexinit_data; data_len = session->kexinit_data_len; /* zap the variables to ensure there is NOT a double free later */ @@ -1918,12 +3268,12 @@ static int kexinit(LIBSSH2_SESSION * session) } rc = _libssh2_transport_send(session, data, data_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { session->kexinit_data = data; session->kexinit_data_len = data_len; return rc; } - else if (rc) { + else if(rc) { LIBSSH2_FREE(session, data); session->kexinit_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, @@ -1931,7 +3281,7 @@ static int kexinit(LIBSSH2_SESSION * session) } - if (session->local.kexinit) { + if(session->local.kexinit) { LIBSSH2_FREE(session, session->local.kexinit); } @@ -1945,33 +3295,49 @@ static int kexinit(LIBSSH2_SESSION * session) /* kex_agree_instr * Kex specific variant of strstr() - * Needle must be preceed by BOL or ',', and followed by ',' or EOL + * Needle must be precede by BOL or ',', and followed by ',' or EOL */ static unsigned char * kex_agree_instr(unsigned char *haystack, unsigned long haystack_len, const unsigned char *needle, unsigned long needle_len) { unsigned char *s; + unsigned char *end_haystack; + unsigned long left; + + if(haystack == NULL || needle == NULL) { + return NULL; + } /* Haystack too short to bother trying */ - if (haystack_len < needle_len) { + if(haystack_len < needle_len || needle_len == 0) { return NULL; } + s = haystack; + end_haystack = &haystack[haystack_len]; + left = end_haystack - s; + /* Needle at start of haystack */ - if ((strncmp((char *) haystack, (char *) needle, needle_len) == 0) && + if((strncmp((char *) haystack, (char *) needle, needle_len) == 0) && (needle_len == haystack_len || haystack[needle_len] == ',')) { return haystack; } - s = haystack; /* Search until we run out of comas or we run out of haystack, whichever comes first */ - while ((s = (unsigned char *) strchr((char *) s, ',')) - && ((haystack_len - (s - haystack)) > needle_len)) { - s++; + while((s = (unsigned char *) memchr((char *) s, ',', left))) { + /* Advance buffer past coma if we can */ + left = end_haystack - s; + if((left >= 1) && (left <= haystack_len) && (left > needle_len)) { + s++; + } + else { + return NULL; + } + /* Needle at X position */ - if ((strncmp((char *) s, (char *) needle, needle_len) == 0) && + if((strncmp((char *) s, (char *) needle, needle_len) == 0) && (((s - haystack) + needle_len) == haystack_len || s[needle_len] == ',')) { return s; @@ -1989,8 +3355,8 @@ static const LIBSSH2_COMMON_METHOD * kex_get_method_by_name(const char *name, size_t name_len, const LIBSSH2_COMMON_METHOD ** methodlist) { - while (*methodlist) { - if ((strlen((*methodlist)->name) == name_len) && + while(*methodlist) { + if((strlen((*methodlist)->name) == name_len) && (strncmp((*methodlist)->name, name, name_len) == 0)) { return *methodlist; } @@ -2011,31 +3377,31 @@ static int kex_agree_hostkey(LIBSSH2_SESSION * session, const LIBSSH2_HOSTKEY_METHOD **hostkeyp = libssh2_hostkey_methods(); unsigned char *s; - if (session->hostkey_prefs) { + if(session->hostkey_prefs) { s = (unsigned char *) session->hostkey_prefs; - while (s && *s) { + while(s && *s) { unsigned char *p = (unsigned char *) strchr((char *) s, ','); size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s)); - if (kex_agree_instr(hostkey, hostkey_len, s, method_len)) { + if(kex_agree_instr(hostkey, hostkey_len, s, method_len)) { const LIBSSH2_HOSTKEY_METHOD *method = (const LIBSSH2_HOSTKEY_METHOD *) kex_get_method_by_name((char *) s, method_len, (const LIBSSH2_COMMON_METHOD **) hostkeyp); - if (!method) { + if(!method) { /* Invalid method -- Should never be reached */ return -1; } /* So far so good, but does it suit our purposes? (Encrypting vs Signing) */ - if (((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_ENC_HOSTKEY) == + if(((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_ENC_HOSTKEY) == 0) || (method->encrypt)) { /* Either this hostkey can do encryption or this kex just doesn't require it */ - if (((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY) + if(((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY) == 0) || (method->sig_verify)) { /* Either this hostkey can do signing or this kex just doesn't require it */ @@ -2050,18 +3416,18 @@ static int kex_agree_hostkey(LIBSSH2_SESSION * session, return -1; } - while (hostkeyp && (*hostkeyp) && (*hostkeyp)->name) { + while(hostkeyp && (*hostkeyp) && (*hostkeyp)->name) { s = kex_agree_instr(hostkey, hostkey_len, (unsigned char *) (*hostkeyp)->name, strlen((*hostkeyp)->name)); - if (s) { + if(s) { /* So far so good, but does it suit our purposes? (Encrypting vs Signing) */ - if (((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_ENC_HOSTKEY) == 0) || + if(((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_ENC_HOSTKEY) == 0) || ((*hostkeyp)->encrypt)) { /* Either this hostkey can do encryption or this kex just doesn't require it */ - if (((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY) == + if(((kex_flags & LIBSSH2_KEX_METHOD_FLAG_REQ_SIGN_HOSTKEY) == 0) || ((*hostkeyp)->sig_verify)) { /* Either this hostkey can do signing or this kex just doesn't require it */ @@ -2088,19 +3454,20 @@ static int kex_agree_kex_hostkey(LIBSSH2_SESSION * session, unsigned char *kex, const LIBSSH2_KEX_METHOD **kexp = libssh2_kex_methods; unsigned char *s; - if (session->kex_prefs) { + if(session->kex_prefs) { s = (unsigned char *) session->kex_prefs; - while (s && *s) { + while(s && *s) { unsigned char *q, *p = (unsigned char *) strchr((char *) s, ','); size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s)); - if ((q = kex_agree_instr(kex, kex_len, s, method_len))) { + q = kex_agree_instr(kex, kex_len, s, method_len); + if(q) { const LIBSSH2_KEX_METHOD *method = (const LIBSSH2_KEX_METHOD *) kex_get_method_by_name((char *) s, method_len, (const LIBSSH2_COMMON_METHOD **) kexp); - if (!method) { + if(!method) { /* Invalid method -- Should never be reached */ return -1; } @@ -2108,13 +3475,13 @@ static int kex_agree_kex_hostkey(LIBSSH2_SESSION * session, unsigned char *kex, /* We've agreed on a key exchange method, * Can we agree on a hostkey that works with this kex? */ - if (kex_agree_hostkey(session, method->flags, hostkey, + if(kex_agree_hostkey(session, method->flags, hostkey, hostkey_len) == 0) { session->kex = method; - if (session->burn_optimistic_kexinit && (kex == q)) { - /* Server sent an optimistic packet, - * and client agrees with preference - * cancel burning the first KEX_INIT packet that comes in */ + if(session->burn_optimistic_kexinit && (kex == q)) { + /* Server sent an optimistic packet, and client agrees + * with preference cancel burning the first KEX_INIT + * packet that comes in */ session->burn_optimistic_kexinit = 0; } return 0; @@ -2126,21 +3493,21 @@ static int kex_agree_kex_hostkey(LIBSSH2_SESSION * session, unsigned char *kex, return -1; } - while (*kexp && (*kexp)->name) { + while(*kexp && (*kexp)->name) { s = kex_agree_instr(kex, kex_len, (unsigned char *) (*kexp)->name, strlen((*kexp)->name)); - if (s) { + if(s) { /* We've agreed on a key exchange method, * Can we agree on a hostkey that works with this kex? */ - if (kex_agree_hostkey(session, (*kexp)->flags, hostkey, + if(kex_agree_hostkey(session, (*kexp)->flags, hostkey, hostkey_len) == 0) { session->kex = *kexp; - if (session->burn_optimistic_kexinit && (kex == s)) { - /* Server sent an optimistic packet, - * and client agrees with preference - * cancel burning the first KEX_INIT packet that comes in */ + if(session->burn_optimistic_kexinit && (kex == s)) { + /* Server sent an optimistic packet, and client agrees + * with preference cancel burning the first KEX_INIT + * packet that comes in */ session->burn_optimistic_kexinit = 0; } return 0; @@ -2166,21 +3533,21 @@ static int kex_agree_crypt(LIBSSH2_SESSION * session, (void) session; - if (endpoint->crypt_prefs) { + if(endpoint->crypt_prefs) { s = (unsigned char *) endpoint->crypt_prefs; - while (s && *s) { + while(s && *s) { unsigned char *p = (unsigned char *) strchr((char *) s, ','); size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s)); - if (kex_agree_instr(crypt, crypt_len, s, method_len)) { + if(kex_agree_instr(crypt, crypt_len, s, method_len)) { const LIBSSH2_CRYPT_METHOD *method = (const LIBSSH2_CRYPT_METHOD *) kex_get_method_by_name((char *) s, method_len, (const LIBSSH2_COMMON_METHOD **) cryptp); - if (!method) { + if(!method) { /* Invalid method -- Should never be reached */ return -1; } @@ -2194,11 +3561,11 @@ static int kex_agree_crypt(LIBSSH2_SESSION * session, return -1; } - while (*cryptp && (*cryptp)->name) { + while(*cryptp && (*cryptp)->name) { s = kex_agree_instr(crypt, crypt_len, (unsigned char *) (*cryptp)->name, strlen((*cryptp)->name)); - if (s) { + if(s) { endpoint->crypt = *cryptp; return 0; } @@ -2221,20 +3588,20 @@ static int kex_agree_mac(LIBSSH2_SESSION * session, unsigned char *s; (void) session; - if (endpoint->mac_prefs) { + if(endpoint->mac_prefs) { s = (unsigned char *) endpoint->mac_prefs; - while (s && *s) { + while(s && *s) { unsigned char *p = (unsigned char *) strchr((char *) s, ','); size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s)); - if (kex_agree_instr(mac, mac_len, s, method_len)) { + if(kex_agree_instr(mac, mac_len, s, method_len)) { const LIBSSH2_MAC_METHOD *method = (const LIBSSH2_MAC_METHOD *) kex_get_method_by_name((char *) s, method_len, (const LIBSSH2_COMMON_METHOD **) macp); - if (!method) { + if(!method) { /* Invalid method -- Should never be reached */ return -1; } @@ -2248,10 +3615,10 @@ static int kex_agree_mac(LIBSSH2_SESSION * session, return -1; } - while (*macp && (*macp)->name) { + while(*macp && (*macp)->name) { s = kex_agree_instr(mac, mac_len, (unsigned char *) (*macp)->name, strlen((*macp)->name)); - if (s) { + if(s) { endpoint->mac = *macp; return 0; } @@ -2274,21 +3641,21 @@ static int kex_agree_comp(LIBSSH2_SESSION *session, unsigned char *s; (void) session; - if (endpoint->comp_prefs) { + if(endpoint->comp_prefs) { s = (unsigned char *) endpoint->comp_prefs; - while (s && *s) { + while(s && *s) { unsigned char *p = (unsigned char *) strchr((char *) s, ','); size_t method_len = (p ? (size_t)(p - s) : strlen((char *) s)); - if (kex_agree_instr(comp, comp_len, s, method_len)) { + if(kex_agree_instr(comp, comp_len, s, method_len)) { const LIBSSH2_COMP_METHOD *method = (const LIBSSH2_COMP_METHOD *) kex_get_method_by_name((char *) s, method_len, (const LIBSSH2_COMMON_METHOD **) compp); - if (!method) { + if(!method) { /* Invalid method -- Should never be reached */ return -1; } @@ -2302,10 +3669,10 @@ static int kex_agree_comp(LIBSSH2_SESSION *session, return -1; } - while (*compp && (*compp)->name) { + while(*compp && (*compp)->name) { s = kex_agree_instr(comp, comp_len, (unsigned char *) (*compp)->name, strlen((*compp)->name)); - if (s) { + if(s) { endpoint->comp = *compp; return 0; } @@ -2316,35 +3683,10 @@ static int kex_agree_comp(LIBSSH2_SESSION *session, } - /* TODO: When in server mode we need to turn this logic on its head * The Client gets to make the final call on "agreed methods" */ -/* - * kex_string_pair() extracts a string from the packet and makes sure it fits - * within the given packet. - */ -static int kex_string_pair(unsigned char **sp, /* parsing position */ - unsigned char *data, /* start pointer to packet */ - size_t data_len, /* size of total packet */ - size_t *lenp, /* length of the string */ - unsigned char **strp) /* pointer to string start */ -{ - unsigned char *s = *sp; - *lenp = _libssh2_ntohu32(s); - - /* the length of the string must fit within the current pointer and the - end of the packet */ - if (*lenp > (data_len - (s - data) -4)) - return 1; - *strp = s + 4; - s += 4 + *lenp; - - *sp = s; - return 0; -} - /* kex_agree_methods * Decide which specific method to use of the methods offered by each party */ @@ -2355,62 +3697,71 @@ static int kex_agree_methods(LIBSSH2_SESSION * session, unsigned char *data, *mac_cs, *mac_sc; size_t kex_len, hostkey_len, crypt_cs_len, crypt_sc_len, comp_cs_len; size_t comp_sc_len, mac_cs_len, mac_sc_len; - unsigned char *s = data; + struct string_buf buf; + + if(data_len < 17) + return -1; - /* Skip packet_type, we know it already */ - s++; + buf.data = (unsigned char *)data; + buf.len = data_len; + buf.dataptr = buf.data; + buf.dataptr++; /* advance past packet type */ /* Skip cookie, don't worry, it's preserved in the kexinit field */ - s += 16; + buf.dataptr += 16; /* Locate each string */ - if(kex_string_pair(&s, data, data_len, &kex_len, &kex)) + if(_libssh2_get_string(&buf, &kex, &kex_len)) return -1; - if(kex_string_pair(&s, data, data_len, &hostkey_len, &hostkey)) + if(_libssh2_get_string(&buf, &hostkey, &hostkey_len)) return -1; - if(kex_string_pair(&s, data, data_len, &crypt_cs_len, &crypt_cs)) + if(_libssh2_get_string(&buf, &crypt_cs, &crypt_cs_len)) return -1; - if(kex_string_pair(&s, data, data_len, &crypt_sc_len, &crypt_sc)) + if(_libssh2_get_string(&buf, &crypt_sc, &crypt_sc_len)) return -1; - if(kex_string_pair(&s, data, data_len, &mac_cs_len, &mac_cs)) + if(_libssh2_get_string(&buf, &mac_cs, &mac_cs_len)) return -1; - if(kex_string_pair(&s, data, data_len, &mac_sc_len, &mac_sc)) + if(_libssh2_get_string(&buf, &mac_sc, &mac_sc_len)) return -1; - if(kex_string_pair(&s, data, data_len, &comp_cs_len, &comp_cs)) + if(_libssh2_get_string(&buf, &comp_cs, &comp_cs_len)) return -1; - if(kex_string_pair(&s, data, data_len, &comp_sc_len, &comp_sc)) + if(_libssh2_get_string(&buf, &comp_sc, &comp_sc_len)) return -1; /* If the server sent an optimistic packet, assume that it guessed wrong. * If the guess is determined to be right (by kex_agree_kex_hostkey) * This flag will be reset to zero so that it's not ignored */ - session->burn_optimistic_kexinit = *(s++); - /* Next uint32 in packet is all zeros (reserved) */ + if(_libssh2_check_length(&buf, 1)) { + session->burn_optimistic_kexinit = *(buf.dataptr++); + } + else { + return -1; + } - if (data_len < (unsigned) (s - data)) - return -1; /* short packet */ + /* Next uint32 in packet is all zeros (reserved) */ - if (kex_agree_kex_hostkey(session, kex, kex_len, hostkey, hostkey_len)) { + if(kex_agree_kex_hostkey(session, kex, kex_len, hostkey, hostkey_len)) { return -1; } - if (kex_agree_crypt(session, &session->local, crypt_cs, crypt_cs_len) - || kex_agree_crypt(session, &session->remote, crypt_sc, crypt_sc_len)) { + if(kex_agree_crypt(session, &session->local, crypt_cs, crypt_cs_len) + || kex_agree_crypt(session, &session->remote, crypt_sc, + crypt_sc_len)) { return -1; } - if (kex_agree_mac(session, &session->local, mac_cs, mac_cs_len) || + if(kex_agree_mac(session, &session->local, mac_cs, mac_cs_len) || kex_agree_mac(session, &session->remote, mac_sc, mac_sc_len)) { return -1; } - if (kex_agree_comp(session, &session->local, comp_cs, comp_cs_len) || + if(kex_agree_comp(session, &session->local, comp_cs, comp_cs_len) || kex_agree_comp(session, &session->remote, comp_sc, comp_sc_len)) { return -1; } #if 0 - if (libssh2_kex_agree_lang(session, &session->local, lang_cs, lang_cs_len) + if(libssh2_kex_agree_lang(session, &session->local, lang_cs, lang_cs_len) || libssh2_kex_agree_lang(session, &session->remote, lang_sc, lang_sc_len)) { return -1; @@ -2454,14 +3805,14 @@ _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange, session->state |= LIBSSH2_STATE_KEX_ACTIVE; - if (key_state->state == libssh2_NB_state_idle) { + if(key_state->state == libssh2_NB_state_idle) { /* Prevent loop in packet_add() */ session->state |= LIBSSH2_STATE_EXCHANGING_KEYS; - if (reexchange) { + if(reexchange) { session->kex = NULL; - if (session->hostkey && session->hostkey->dtor) { + if(session->hostkey && session->hostkey->dtor) { session->hostkey->dtor(session, &session->server_hostkey_abstract); } @@ -2471,8 +3822,8 @@ _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange, key_state->state = libssh2_NB_state_created; } - if (!session->kex || !session->hostkey) { - if (key_state->state == libssh2_NB_state_created) { + if(!session->kex || !session->hostkey) { + if(key_state->state == libssh2_NB_state_created) { /* Preserve in case of failure */ key_state->oldlocal = session->local.kexinit; key_state->oldlocal_len = session->local.kexinit_len; @@ -2482,12 +3833,13 @@ _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange, key_state->state = libssh2_NB_state_sent; } - if (key_state->state == libssh2_NB_state_sent) { + if(key_state->state == libssh2_NB_state_sent) { retcode = kexinit(session); - if (retcode == LIBSSH2_ERROR_EAGAIN) { + if(retcode == LIBSSH2_ERROR_EAGAIN) { session->state &= ~LIBSSH2_STATE_KEX_ACTIVE; return retcode; - } else if (retcode) { + } + else if(retcode) { session->local.kexinit = key_state->oldlocal; session->local.kexinit_len = key_state->oldlocal_len; key_state->state = libssh2_NB_state_idle; @@ -2499,18 +3851,18 @@ _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange, key_state->state = libssh2_NB_state_sent1; } - if (key_state->state == libssh2_NB_state_sent1) { + if(key_state->state == libssh2_NB_state_sent1) { retcode = _libssh2_packet_require(session, SSH_MSG_KEXINIT, &key_state->data, &key_state->data_len, 0, NULL, 0, &key_state->req_state); - if (retcode == LIBSSH2_ERROR_EAGAIN) { + if(retcode == LIBSSH2_ERROR_EAGAIN) { session->state &= ~LIBSSH2_STATE_KEX_ACTIVE; return retcode; } - else if (retcode) { - if (session->local.kexinit) { + else if(retcode) { + if(session->local.kexinit) { LIBSSH2_FREE(session, session->local.kexinit); } session->local.kexinit = key_state->oldlocal; @@ -2521,42 +3873,45 @@ _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange, return -1; } - if (session->remote.kexinit) { + if(session->remote.kexinit) { LIBSSH2_FREE(session, session->remote.kexinit); } session->remote.kexinit = key_state->data; session->remote.kexinit_len = key_state->data_len; - if (kex_agree_methods(session, key_state->data, + if(kex_agree_methods(session, key_state->data, key_state->data_len)) rc = LIBSSH2_ERROR_KEX_FAILURE; key_state->state = libssh2_NB_state_sent2; } - } else { + } + else { key_state->state = libssh2_NB_state_sent2; } - if (rc == 0 && session->kex) { - if (key_state->state == libssh2_NB_state_sent2) { + if(rc == 0 && session->kex) { + if(key_state->state == libssh2_NB_state_sent2) { retcode = session->kex->exchange_keys(session, &key_state->key_state_low); - if (retcode == LIBSSH2_ERROR_EAGAIN) { + if(retcode == LIBSSH2_ERROR_EAGAIN) { session->state &= ~LIBSSH2_STATE_KEX_ACTIVE; return retcode; - } else if (retcode) { - rc = _libssh2_error(session, LIBSSH2_ERROR_KEY_EXCHANGE_FAILURE, + } + else if(retcode) { + rc = _libssh2_error(session, + LIBSSH2_ERROR_KEY_EXCHANGE_FAILURE, "Unrecoverable error exchanging keys"); } } } /* Done with kexinit buffers */ - if (session->local.kexinit) { + if(session->local.kexinit) { LIBSSH2_FREE(session, session->local.kexinit); session->local.kexinit = NULL; } - if (session->remote.kexinit) { + if(session->remote.kexinit) { LIBSSH2_FREE(session, session->remote.kexinit); session->remote.kexinit = NULL; } @@ -2582,7 +3937,7 @@ libssh2_session_method_pref(LIBSSH2_SESSION * session, int method_type, int prefs_len = strlen(prefs); const LIBSSH2_COMMON_METHOD **mlist; - switch (method_type) { + switch(method_type) { case LIBSSH2_METHOD_KEX: prefvar = &session->kex_prefs; mlist = (const LIBSSH2_COMMON_METHOD **) libssh2_kex_methods; @@ -2635,46 +3990,54 @@ libssh2_session_method_pref(LIBSSH2_SESSION * session, int method_type, mlist = NULL; break; + case LIBSSH2_METHOD_SIGN_ALGO: + prefvar = &session->sign_algo_prefs; + mlist = NULL; + break; + default: return _libssh2_error(session, LIBSSH2_ERROR_INVAL, "Invalid parameter specified for method_type"); } s = newprefs = LIBSSH2_ALLOC(session, prefs_len + 1); - if (!newprefs) { + if(!newprefs) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Error allocated space for method preferences"); } memcpy(s, prefs, prefs_len + 1); - while (s && *s && mlist) { + while(s && *s && mlist) { char *p = strchr(s, ','); int method_len = p ? (p - s) : (int) strlen(s); - if (!kex_get_method_by_name(s, method_len, mlist)) { + if(!kex_get_method_by_name(s, method_len, mlist)) { /* Strip out unsupported method */ - if (p) { + if(p) { memcpy(s, p + 1, strlen(s) - method_len); - } else { - if (s > newprefs) { + } + else { + if(s > newprefs) { *(--s) = '\0'; - } else { + } + else { *s = '\0'; } } } - - s = p ? (p + 1) : NULL; + else { + s = p ? (p + 1) : NULL; + } } - if (strlen(newprefs) == 0) { + if(!*newprefs) { LIBSSH2_FREE(session, newprefs); return _libssh2_error(session, LIBSSH2_ERROR_METHOD_NOT_SUPPORTED, "The requested method(s) are not currently " "supported"); } - if (*prefvar) { + if(*prefvar) { LIBSSH2_FREE(session, *prefvar); } *prefvar = newprefs; @@ -2690,7 +4053,7 @@ libssh2_session_method_pref(LIBSSH2_SESSION * session, int method_type, LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, int method_type, - const char*** algs) + const char ***algs) { unsigned int i; unsigned int j; @@ -2698,11 +4061,11 @@ LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, const LIBSSH2_COMMON_METHOD **mlist; /* to prevent coredumps due to dereferencing of NULL */ - if (NULL == algs) + if(NULL == algs) return _libssh2_error(session, LIBSSH2_ERROR_BAD_USE, "algs must not be NULL"); - switch (method_type) { + switch(method_type) { case LIBSSH2_METHOD_KEX: mlist = (const LIBSSH2_COMMON_METHOD **) libssh2_kex_methods; break; @@ -2723,7 +4086,13 @@ LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, case LIBSSH2_METHOD_COMP_CS: case LIBSSH2_METHOD_COMP_SC: - mlist = (const LIBSSH2_COMMON_METHOD **) _libssh2_comp_methods(session); + mlist = (const LIBSSH2_COMMON_METHOD **) + _libssh2_comp_methods(session); + break; + + case LIBSSH2_METHOD_SIGN_ALGO: + /* no built-in supported list due to backend support */ + mlist = NULL; break; default: @@ -2732,7 +4101,7 @@ LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, } /* switch */ /* weird situation */ - if (NULL==mlist) + if(NULL == mlist) return _libssh2_error(session, LIBSSH2_ERROR_INVAL, "No algorithm found"); @@ -2741,7 +4110,7 @@ LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, supported algorithms (needed to allocate the proper size of array) and the second time to actually copy the pointers. Typically this function will not be called often (typically at the beginning of a session) and - the number of algorithms (i.e. niumber of iterations in one loop) will + the number of algorithms (i.e. number of iterations in one loop) will not be high (typically it will not exceed 20) for quite a long time. So double looping really shouldn't be an issue and it is definitely a @@ -2749,28 +4118,28 @@ LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, */ /* count the number of supported algorithms */ - for ( i=0, ialg=0; NULL!=mlist[i]; i++) { + for(i = 0, ialg = 0; NULL != mlist[i]; i++) { /* do not count fields with NULL name */ - if (mlist[i]->name) + if(mlist[i]->name) ialg++; } /* weird situation, no algorithm found */ - if (0==ialg) + if(0 == ialg) return _libssh2_error(session, LIBSSH2_ERROR_INVAL, "No algorithm found"); /* allocate buffer */ - *algs = (const char**) LIBSSH2_ALLOC(session, ialg*sizeof(const char*)); - if ( NULL==*algs ) { + *algs = (const char **) LIBSSH2_ALLOC(session, ialg*sizeof(const char *)); + if(NULL == *algs) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Memory allocation failed"); } /* Past this point *algs must be deallocated in case of an error!! */ /* copy non-NULL pointers only */ - for ( i=0, j=0; NULL!=mlist[i] && jname ){ + for(i = 0, j = 0; NULL != mlist[i] && j < ialg; i++) { + if(NULL == mlist[i]->name) { /* maybe a weird situation but if it occurs, do not include NULL pointers */ continue; @@ -2781,7 +4150,7 @@ LIBSSH2_API int libssh2_session_supported_algs(LIBSSH2_SESSION* session, } /* correct number of pointers copied? (test the code above) */ - if ( j!=ialg ) { + if(j != ialg) { /* deallocate buffer */ LIBSSH2_FREE(session, (void *)*algs); *algs = NULL; diff --git a/vendor/libssh2/src/knownhost.c b/vendor/libssh2/src/knownhost.c index a32dcf8764..77798fbfde 100644 --- a/vendor/libssh2/src/knownhost.c +++ b/vendor/libssh2/src/knownhost.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2014 by Daniel Stenberg + * Copyright (c) 2009-2019 by Daniel Stenberg * All rights reserved. * * Redistribution and use in source and binary forms, @@ -71,7 +71,7 @@ static void free_host(LIBSSH2_SESSION *session, struct known_host *entry) if(entry) { if(entry->comment) LIBSSH2_FREE(session, entry->comment); - if (entry->key_type_name) + if(entry->key_type_name) LIBSSH2_FREE(session, entry->key_type_name); if(entry->key) LIBSSH2_FREE(session, entry->key); @@ -149,7 +149,8 @@ knownhost_add(LIBSSH2_KNOWNHOSTS *hosts, return _libssh2_error(hosts->session, LIBSSH2_ERROR_INVAL, "No key type set"); - if(!(entry = LIBSSH2_CALLOC(hosts->session, sizeof(struct known_host)))) + entry = LIBSSH2_CALLOC(hosts->session, sizeof(struct known_host)); + if(!entry) return _libssh2_error(hosts->session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for known host " "entry"); @@ -159,13 +160,13 @@ knownhost_add(LIBSSH2_KNOWNHOSTS *hosts, switch(entry->typemask & LIBSSH2_KNOWNHOST_TYPE_MASK) { case LIBSSH2_KNOWNHOST_TYPE_PLAIN: case LIBSSH2_KNOWNHOST_TYPE_CUSTOM: - entry->name = LIBSSH2_ALLOC(hosts->session, hostlen+1); + entry->name = LIBSSH2_ALLOC(hosts->session, hostlen + 1); if(!entry->name) { rc = _libssh2_error(hosts->session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for host name"); goto error; } - memcpy(entry->name, host, hostlen+1); + memcpy(entry->name, host, hostlen + 1); entry->name_len = hostlen; break; case LIBSSH2_KNOWNHOST_TYPE_SHA1: @@ -193,14 +194,14 @@ knownhost_add(LIBSSH2_KNOWNHOSTS *hosts, /* the provided key is base64 encoded already */ if(!keylen) keylen = strlen(key); - entry->key = LIBSSH2_ALLOC(hosts->session, keylen+1); + entry->key = LIBSSH2_ALLOC(hosts->session, keylen + 1); if(!entry->key) { rc = _libssh2_error(hosts->session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for key"); goto error; } - memcpy(entry->key, key, keylen+1); - entry->key[keylen]=0; /* force a terminating zero trailer */ + memcpy(entry->key, key, keylen + 1); + entry->key[keylen] = 0; /* force a terminating zero trailer */ } else { /* key is raw, we base64 encode it and store it as such */ @@ -216,28 +217,28 @@ knownhost_add(LIBSSH2_KNOWNHOSTS *hosts, entry->key = ptr; } - if (key_type_name && ((typemask & LIBSSH2_KNOWNHOST_KEY_MASK) == + if(key_type_name && ((typemask & LIBSSH2_KNOWNHOST_KEY_MASK) == LIBSSH2_KNOWNHOST_KEY_UNKNOWN)) { - entry->key_type_name = LIBSSH2_ALLOC(hosts->session, key_type_len+1); - if (!entry->key_type_name) { + entry->key_type_name = LIBSSH2_ALLOC(hosts->session, key_type_len + 1); + if(!entry->key_type_name) { rc = _libssh2_error(hosts->session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for key type"); goto error; } memcpy(entry->key_type_name, key_type_name, key_type_len); - entry->key_type_name[key_type_len]=0; + entry->key_type_name[key_type_len] = 0; entry->key_type_len = key_type_len; } - if (comment) { - entry->comment = LIBSSH2_ALLOC(hosts->session, commentlen+1); + if(comment) { + entry->comment = LIBSSH2_ALLOC(hosts->session, commentlen + 1); if(!entry->comment) { rc = _libssh2_error(hosts->session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for comment"); goto error; } - memcpy(entry->comment, comment, commentlen+1); - entry->comment[commentlen]=0; /* force a terminating zero trailer */ + memcpy(entry->comment, comment, commentlen + 1); + entry->comment[commentlen] = 0; /* force a terminating zero trailer */ entry->comment_len = commentlen; } else { @@ -370,7 +371,7 @@ knownhost_check(LIBSSH2_KNOWNHOSTS *hosts, plain 'host' */ if(port >= 0) { int len = snprintf(hostbuff, sizeof(hostbuff), "[%s]:%d", hostp, port); - if (len < 0 || len >= (int)sizeof(hostbuff)) { + if(len < 0 || len >= (int)sizeof(hostbuff)) { _libssh2_error(hosts->session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, "Known-host write buffer too small"); @@ -401,7 +402,7 @@ knownhost_check(LIBSSH2_KNOWNHOSTS *hosts, do { node = _libssh2_list_first(&hosts->head); - while (node) { + while(node) { switch(node->typemask & LIBSSH2_KNOWNHOST_TYPE_MASK) { case LIBSSH2_KNOWNHOST_TYPE_PLAIN: if(type == LIBSSH2_KNOWNHOST_TYPE_PLAIN) @@ -450,13 +451,13 @@ knownhost_check(LIBSSH2_KNOWNHOSTS *hosts, - if key_type is set to zero, ignore it an match always - otherwise match when both key types are equal */ - if ( (host_key_type != LIBSSH2_KNOWNHOST_KEY_UNKNOWN ) && - ( (host_key_type == 0) || - (host_key_type == known_key_type) ) ) { + if(host_key_type != LIBSSH2_KNOWNHOST_KEY_UNKNOWN && + (host_key_type == 0 || + host_key_type == known_key_type)) { /* host name and key type match, now compare the keys */ if(!strcmp(key, node->key)) { /* they match! */ - if (ext) + if(ext) *ext = knownhost_to_external(node); badkey = NULL; rc = LIBSSH2_KNOWNHOST_CHECK_MATCH; @@ -472,14 +473,14 @@ knownhost_check(LIBSSH2_KNOWNHOSTS *hosts, } match = 0; /* don't count this as a match anymore */ } - node= _libssh2_list_next(&node->node); + node = _libssh2_list_next(&node->node); } host = hostp; } while(!match && --numcheck); if(badkey) { /* key mismatch */ - if (ext) + if(ext) *ext = knownhost_to_external(badkey); rc = LIBSSH2_KNOWNHOST_CHECK_MISMATCH; } @@ -646,7 +647,7 @@ static int oldstyle_hostline(LIBSSH2_KNOWNHOSTS *hosts, /* copy host name to the temp buffer and zero terminate */ memcpy(hostbuf, name, namelen); - hostbuf[namelen]=0; + hostbuf[namelen] = 0; rc = knownhost_add(hosts, hostbuf, NULL, key_type_name, key_type_len, @@ -685,7 +686,7 @@ static int hashed_hostline(LIBSSH2_KNOWNHOSTS *hosts, for(p = salt; *p && (*p != '|'); p++) ; - if(*p=='|') { + if(*p == '|') { const char *hash = NULL; size_t saltlen = p - salt; if(saltlen >= (sizeof(saltbuf)-1)) /* weird length */ @@ -698,11 +699,11 @@ static int hashed_hostline(LIBSSH2_KNOWNHOSTS *hosts, saltbuf[saltlen] = 0; /* zero terminate */ salt = saltbuf; /* point to the stack based buffer */ - hash = p+1; /* the host hash is after the separator */ + hash = p + 1; /* the host hash is after the separator */ /* now make the host point to the hash */ host = hash; - hostlen -= saltlen+1; /* deduct the salt and separator */ + hostlen -= saltlen + 1; /* deduct the salt and separator */ /* check that the lengths seem sensible */ if(hostlen >= sizeof(hostbuf)-1) @@ -712,7 +713,7 @@ static int hashed_hostline(LIBSSH2_KNOWNHOSTS *hosts, "(unexpected length)"); memcpy(hostbuf, host, hostlen); - hostbuf[hostlen]=0; + hostbuf[hostlen] = 0; return knownhost_add(hosts, hostbuf, salt, key_type_name, key_type_len, @@ -766,17 +767,25 @@ static int hostline(LIBSSH2_KNOWNHOSTS *hosts, default: key_type_name = key; - while (keylen && *key && + while(keylen && *key && (*key != ' ') && (*key != '\t')) { key++; keylen--; } key_type_len = key - key_type_name; - if (!strncmp(key_type_name, "ssh-dss", key_type_len)) + if(!strncmp(key_type_name, "ssh-dss", key_type_len)) key_type = LIBSSH2_KNOWNHOST_KEY_SSHDSS; - else if (!strncmp(key_type_name, "ssh-rsa", key_type_len)) + else if(!strncmp(key_type_name, "ssh-rsa", key_type_len)) key_type = LIBSSH2_KNOWNHOST_KEY_SSHRSA; + else if(!strncmp(key_type_name, "ecdsa-sha2-nistp256", key_type_len)) + key_type = LIBSSH2_KNOWNHOST_KEY_ECDSA_256; + else if(!strncmp(key_type_name, "ecdsa-sha2-nistp384", key_type_len)) + key_type = LIBSSH2_KNOWNHOST_KEY_ECDSA_384; + else if(!strncmp(key_type_name, "ecdsa-sha2-nistp521", key_type_len)) + key_type = LIBSSH2_KNOWNHOST_KEY_ECDSA_521; + else if(!strncmp(key_type_name, "ssh-ed25519", key_type_len)) + key_type = LIBSSH2_KNOWNHOST_KEY_ED25519; else key_type = LIBSSH2_KNOWNHOST_KEY_UNKNOWN; @@ -800,7 +809,7 @@ static int hostline(LIBSSH2_KNOWNHOSTS *hosts, keylen -= commentlen; /* Distinguish empty comment (a space) from no comment (no space) */ - if (commentlen == 0) + if(commentlen == 0) comment = NULL; /* skip whitespaces */ @@ -879,7 +888,7 @@ libssh2_knownhost_readline(LIBSSH2_KNOWNHOSTS *hosts, cp = line; /* skip leading whitespaces */ - while(len && ((*cp==' ') || (*cp == '\t'))) { + while(len && ((*cp == ' ') || (*cp == '\t'))) { cp++; len--; } @@ -892,7 +901,7 @@ libssh2_knownhost_readline(LIBSSH2_KNOWNHOSTS *hosts, hostp = cp; /* move over the host to the separator */ - while(len && *cp && (*cp!=' ') && (*cp != '\t')) { + while(len && *cp && (*cp != ' ') && (*cp != '\t')) { cp++; len--; } @@ -900,7 +909,7 @@ libssh2_knownhost_readline(LIBSSH2_KNOWNHOSTS *hosts, hostlen = cp - hostp; /* the key starts after the whitespaces */ - while(len && *cp && ((*cp==' ') || (*cp == '\t'))) { + while(len && *cp && ((*cp == ' ') || (*cp == '\t'))) { cp++; len--; } @@ -946,7 +955,7 @@ libssh2_knownhost_readfile(LIBSSH2_KNOWNHOSTS *hosts, { FILE *file; int num = 0; - char buf[2048]; + char buf[4092]; if(type != LIBSSH2_KNOWNHOST_FILE_OPENSSH) return _libssh2_error(hosts->session, @@ -954,7 +963,7 @@ libssh2_knownhost_readfile(LIBSSH2_KNOWNHOSTS *hosts, "Unsupported type of known-host information " "store"); - file = fopen(filename, "r"); + file = fopen(filename, FOPEN_READTEXT); if(file) { while(fgets(buf, sizeof(buf), file)) { if(libssh2_knownhost_readline(hosts, buf, strlen(buf), type)) { @@ -1016,13 +1025,30 @@ knownhost_writeline(LIBSSH2_KNOWNHOSTS *hosts, key_type_name = "ssh-dss"; key_type_len = 7; break; + case LIBSSH2_KNOWNHOST_KEY_ECDSA_256: + key_type_name = "ecdsa-sha2-nistp256"; + key_type_len = 19; + break; + case LIBSSH2_KNOWNHOST_KEY_ECDSA_384: + key_type_name = "ecdsa-sha2-nistp384"; + key_type_len = 19; + break; + case LIBSSH2_KNOWNHOST_KEY_ECDSA_521: + key_type_name = "ecdsa-sha2-nistp521"; + key_type_len = 19; + break; + case LIBSSH2_KNOWNHOST_KEY_ED25519: + key_type_name = "ssh-ed25519"; + key_type_len = 11; + break; case LIBSSH2_KNOWNHOST_KEY_UNKNOWN: key_type_name = node->key_type_name; - if (key_type_name) { + if(key_type_name) { key_type_len = node->key_type_len; break; } /* otherwise fallback to default and error */ + /* FALL-THROUGH */ default: return _libssh2_error(hosts->session, LIBSSH2_ERROR_METHOD_NOT_SUPPORTED, @@ -1033,7 +1059,7 @@ knownhost_writeline(LIBSSH2_KNOWNHOSTS *hosts, - Hashed (SHA1) or unhashed hostname - key name or no key name (RSA1) - comment or no comment - + This means there are 2^3 different formats: ("|1|%s|%s %s %s %s\n", salt, hashed_host, key_name, key, comment) ("|1|%s|%s %s %s\n", salt, hashed_host, key_name, key) @@ -1043,7 +1069,7 @@ knownhost_writeline(LIBSSH2_KNOWNHOSTS *hosts, ("%s %s %s\n", host, key_name, key) ("%s %s %s\n", host, key, comment) ("%s %s\n", host, key) - + Even if the buffer is too small, we have to set outlen to the number of characters the complete line would have taken. We also don't write anything to the buffer unless we are sure we can write everything to the @@ -1087,10 +1113,10 @@ knownhost_writeline(LIBSSH2_KNOWNHOSTS *hosts, if(node->comment && key_type_len) snprintf(buf, buflen, "|1|%s|%s %s %s %s\n", saltalloc, namealloc, key_type_name, node->key, node->comment); - else if (node->comment) + else if(node->comment) snprintf(buf, buflen, "|1|%s|%s %s %s\n", saltalloc, namealloc, node->key, node->comment); - else if (key_type_len) + else if(key_type_len) snprintf(buf, buflen, "|1|%s|%s %s %s\n", saltalloc, namealloc, key_type_name, node->key); else @@ -1109,10 +1135,10 @@ knownhost_writeline(LIBSSH2_KNOWNHOSTS *hosts, if(node->comment && key_type_len) snprintf(buf, buflen, "%s %s %s %s\n", node->name, key_type_name, node->key, node->comment); - else if (node->comment) + else if(node->comment) snprintf(buf, buflen, "%s %s %s\n", node->name, node->key, node->comment); - else if (key_type_len) + else if(key_type_len) snprintf(buf, buflen, "%s %s %s\n", node->name, key_type_name, node->key); else @@ -1168,7 +1194,7 @@ libssh2_knownhost_writefile(LIBSSH2_KNOWNHOSTS *hosts, struct known_host *node; FILE *file; int rc = LIBSSH2_ERROR_NONE; - char buffer[2048]; + char buffer[4092]; /* we only support this single file type for now, bail out on all other attempts */ @@ -1178,7 +1204,7 @@ libssh2_knownhost_writefile(LIBSSH2_KNOWNHOSTS *hosts, "Unsupported type of known-host information " "store"); - file = fopen(filename, "w"); + file = fopen(filename, FOPEN_WRITETEXT); if(!file) return _libssh2_error(hosts->session, LIBSSH2_ERROR_FILE, "Failed to open file"); diff --git a/vendor/libssh2/src/libgcrypt.c b/vendor/libssh2/src/libgcrypt.c index 366d007a34..f6e9b64a3c 100644 --- a/vendor/libssh2/src/libgcrypt.c +++ b/vendor/libssh2/src/libgcrypt.c @@ -66,17 +66,18 @@ _libssh2_rsa_new(libssh2_rsa_ctx ** rsa, (void) e2data; (void) e2len; - if (ddata) { + if(ddata) { rc = gcry_sexp_build (rsa, NULL, "(private-key(rsa(n%b)(e%b)(d%b)(q%b)(p%b)(u%b)))", nlen, ndata, elen, edata, dlen, ddata, plen, pdata, qlen, qdata, coefflen, coeffdata); - } else { + } + else { rc = gcry_sexp_build(rsa, NULL, "(public-key(rsa(n%b)(e%b)))", nlen, ndata, elen, edata); } - if (rc) { + if(rc) { *rsa = NULL; return -1; } @@ -99,12 +100,12 @@ _libssh2_rsa_sha1_verify(libssh2_rsa_ctx * rsa, rc = gcry_sexp_build(&s_hash, NULL, "(data (flags pkcs1) (hash sha1 %b))", SHA_DIGEST_LENGTH, hash); - if (rc != 0) { + if(rc != 0) { return -1; } rc = gcry_sexp_build(&s_sig, NULL, "(sig-val(rsa(s %b)))", sig_len, sig); - if (rc != 0) { + if(rc != 0) { gcry_sexp_release(s_hash); return -1; } @@ -130,18 +131,19 @@ _libssh2_dsa_new(libssh2_dsa_ctx ** dsactx, { int rc; - if (x_len) { + if(x_len) { rc = gcry_sexp_build (dsactx, NULL, "(private-key(dsa(p%b)(q%b)(g%b)(y%b)(x%b)))", p_len, p, q_len, q, g_len, g, y_len, y, x_len, x); - } else { + } + else { rc = gcry_sexp_build(dsactx, NULL, "(public-key(dsa(p%b)(q%b)(g%b)(y%b)))", p_len, p, q_len, q, g_len, g, y_len, y); } - if (rc) { + if(rc) { *dsactx = NULL; return -1; } @@ -172,84 +174,83 @@ _libssh2_rsa_new_private(libssh2_rsa_ctx ** rsa, unsigned char *n, *e, *d, *p, *q, *e1, *e2, *coeff; unsigned int nlen, elen, dlen, plen, qlen, e1len, e2len, coefflen; - (void) passphrase; - - fp = fopen(filename, "r"); - if (!fp) { + fp = fopen(filename, FOPEN_READTEXT); + if(!fp) { return -1; } ret = _libssh2_pem_parse(session, "-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----", + passphrase, fp, &data, &datalen); fclose(fp); - if (ret) { + if(ret) { return -1; } save_data = data; - if (_libssh2_pem_decode_sequence(&data, &datalen)) { + if(_libssh2_pem_decode_sequence(&data, &datalen)) { ret = -1; goto fail; } /* First read Version field (should be 0). */ ret = _libssh2_pem_decode_integer(&data, &datalen, &n, &nlen); - if (ret != 0 || (nlen != 1 && *n != '\0')) { + if(ret != 0 || (nlen != 1 && *n != '\0')) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &n, &nlen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &e, &elen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &d, &dlen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &p, &plen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &q, &qlen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &e1, &e1len); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &e2, &e2len); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &coeff, &coefflen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } - if (_libssh2_rsa_new(rsa, e, elen, n, nlen, d, dlen, p, plen, + if(_libssh2_rsa_new(rsa, e, elen, n, nlen, d, dlen, p, plen, q, qlen, e1, e1len, e2, e2len, coeff, coefflen)) { ret = -1; goto fail; @@ -285,72 +286,71 @@ _libssh2_dsa_new_private(libssh2_dsa_ctx ** dsa, unsigned char *p, *q, *g, *y, *x; unsigned int plen, qlen, glen, ylen, xlen; - (void) passphrase; - - fp = fopen(filename, "r"); - if (!fp) { + fp = fopen(filename, FOPEN_READTEXT); + if(!fp) { return -1; } ret = _libssh2_pem_parse(session, "-----BEGIN DSA PRIVATE KEY-----", "-----END DSA PRIVATE KEY-----", + passphrase, fp, &data, &datalen); fclose(fp); - if (ret) { + if(ret) { return -1; } save_data = data; - if (_libssh2_pem_decode_sequence(&data, &datalen)) { + if(_libssh2_pem_decode_sequence(&data, &datalen)) { ret = -1; goto fail; } /* First read Version field (should be 0). */ ret = _libssh2_pem_decode_integer(&data, &datalen, &p, &plen); - if (ret != 0 || (plen != 1 && *p != '\0')) { + if(ret != 0 || (plen != 1 && *p != '\0')) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &p, &plen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &q, &qlen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &g, &glen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &y, &ylen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } ret = _libssh2_pem_decode_integer(&data, &datalen, &x, &xlen); - if (ret != 0) { + if(ret != 0) { ret = -1; goto fail; } - if (datalen != 0) { + if(datalen != 0) { ret = -1; goto fail; } - if (_libssh2_dsa_new(dsa, p, plen, q, qlen, g, glen, y, ylen, x, xlen)) { + if(_libssh2_dsa_new(dsa, p, plen, q, qlen, g, glen, y, ylen, x, xlen)) { ret = -1; goto fail; } @@ -375,11 +375,11 @@ _libssh2_rsa_sha1_sign(LIBSSH2_SESSION * session, const char *tmp; size_t size; - if (hash_len != SHA_DIGEST_LENGTH) { + if(hash_len != SHA_DIGEST_LENGTH) { return -1; } - if (gcry_sexp_build(&data, NULL, + if(gcry_sexp_build(&data, NULL, "(data (flags pkcs1) (hash sha1 %b))", hash_len, hash)) { return -1; @@ -389,32 +389,36 @@ _libssh2_rsa_sha1_sign(LIBSSH2_SESSION * session, gcry_sexp_release(data); - if (rc != 0) { + if(rc != 0) { return -1; } data = gcry_sexp_find_token(sig_sexp, "s", 0); - if (!data) { + if(!data) { return -1; } tmp = gcry_sexp_nth_data(data, 1, &size); - if (!tmp) { + if(!tmp) { + gcry_sexp_release(data); return -1; } - if (tmp[0] == '\0') { + if(tmp[0] == '\0') { tmp++; size--; } *signature = LIBSSH2_ALLOC(session, size); - if (!*signature) { + if(!*signature) { + gcry_sexp_release(data); return -1; } memcpy(*signature, tmp, size); *signature_len = size; + gcry_sexp_release(data); + return rc; } @@ -430,14 +434,15 @@ _libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, const char *tmp; size_t size; - if (hash_len != SHA_DIGEST_LENGTH) { + if(hash_len != SHA_DIGEST_LENGTH) { return -1; } memcpy(zhash + 1, hash, hash_len); zhash[0] = 0; - if (gcry_sexp_build(&data, NULL, "(data (value %b))", hash_len + 1, zhash)) { + if(gcry_sexp_build(&data, NULL, "(data (value %b))", + hash_len + 1, zhash)) { return -1; } @@ -445,7 +450,7 @@ _libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, gcry_sexp_release(data); - if (ret != 0) { + if(ret != 0) { return -1; } @@ -454,19 +459,19 @@ _libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, /* Extract R. */ data = gcry_sexp_find_token(sig_sexp, "r", 0); - if (!data) + if(!data) goto err; tmp = gcry_sexp_nth_data(data, 1, &size); - if (!tmp) + if(!tmp) goto err; - if (tmp[0] == '\0') { + if(tmp[0] == '\0') { tmp++; size--; } - if (size < 1 || size > 20) + if(size < 1 || size > 20) goto err; memcpy(sig + (20 - size), tmp, size); @@ -476,19 +481,19 @@ _libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, /* Extract S. */ data = gcry_sexp_find_token(sig_sexp, "s", 0); - if (!data) + if(!data) goto err; tmp = gcry_sexp_nth_data(data, 1, &size); - if (!tmp) + if(!tmp) goto err; - if (tmp[0] == '\0') { + if(tmp[0] == '\0') { tmp++; size--; } - if (size < 1 || size > 20) + if(size < 1 || size > 20) goto err; memcpy(sig + 20 + (20 - size), tmp, size); @@ -498,10 +503,10 @@ _libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, ret = -1; out: - if (sig_sexp) { + if(sig_sexp) { gcry_sexp_release(sig_sexp); } - if (data) { + if(data) { gcry_sexp_release(data); } return ret; @@ -519,12 +524,12 @@ _libssh2_dsa_sha1_verify(libssh2_dsa_ctx * dsactx, libssh2_sha1(m, m_len, hash + 1); hash[0] = 0; - if (gcry_sexp_build(&s_hash, NULL, "(data(flags raw)(value %b))", + if(gcry_sexp_build(&s_hash, NULL, "(data(flags raw)(value %b))", SHA_DIGEST_LENGTH + 1, hash)) { return -1; } - if (gcry_sexp_build(&s_sig, NULL, "(sig-val(dsa(r %b)(s %b)))", + if(gcry_sexp_build(&s_sig, NULL, "(sig-val(dsa(r %b)(s %b)))", 20, sig, 20, sig + 20)) { gcry_sexp_release(s_hash); return -1; @@ -543,30 +548,30 @@ _libssh2_cipher_init(_libssh2_cipher_ctx * h, unsigned char *iv, unsigned char *secret, int encrypt) { int ret; - int cipher = _libssh2_gcry_cipher (algo); - int mode = _libssh2_gcry_mode (algo); + int cipher = _libssh2_gcry_cipher(algo); + int mode = _libssh2_gcry_mode(algo); int keylen = gcry_cipher_get_algo_keylen(cipher); (void) encrypt; ret = gcry_cipher_open(h, cipher, mode, 0); - if (ret) { + if(ret) { return -1; } ret = gcry_cipher_setkey(*h, secret, keylen); - if (ret) { + if(ret) { gcry_cipher_close(*h); return -1; } - if (mode != GCRY_CIPHER_MODE_STREAM) { + if(mode != GCRY_CIPHER_MODE_STREAM) { int blklen = gcry_cipher_get_algo_blklen(cipher); - if (mode == GCRY_CIPHER_MODE_CTR) + if(mode == GCRY_CIPHER_MODE_CTR) ret = gcry_cipher_setctr(*h, iv, blklen); else ret = gcry_cipher_setiv(*h, iv, blklen); - if (ret) { + if(ret) { gcry_cipher_close(*h); return -1; } @@ -580,12 +585,13 @@ _libssh2_cipher_crypt(_libssh2_cipher_ctx * ctx, _libssh2_cipher_type(algo), int encrypt, unsigned char *block, size_t blklen) { - int cipher = _libssh2_gcry_cipher (algo); + int cipher = _libssh2_gcry_cipher(algo); int ret; - if (encrypt) { + if(encrypt) { ret = gcry_cipher_encrypt(*ctx, block, blklen, block, blklen); - } else { + } + else { ret = gcry_cipher_decrypt(*ctx, block, blklen, block, blklen); } return ret; @@ -602,8 +608,9 @@ _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, const char *passphrase) { return _libssh2_error(session, LIBSSH2_ERROR_METHOD_NOT_SUPPORTED, - "Unable to extract public key from private key in memory: " - "Method unimplemented in libgcrypt backend"); + "Unable to extract public key from private " + "key in memory: " + "Method unimplemented in libgcrypt backend"); } int @@ -624,4 +631,55 @@ void _libssh2_init_aes_ctr(void) { /* no implementation */ } + +void +_libssh2_dh_init(_libssh2_dh_ctx *dhctx) +{ + *dhctx = gcry_mpi_new(0); /* Random from client */ +} + +int +_libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, int group_order) +{ + /* Generate x and e */ + gcry_mpi_randomize(*dhctx, group_order * 8 - 1, GCRY_WEAK_RANDOM); + gcry_mpi_powm(public, g, *dhctx, p); + return 0; +} + +int +_libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p) +{ + /* Compute the shared secret */ + gcry_mpi_powm(secret, f, *dhctx, p); + return 0; +} + +void +_libssh2_dh_dtor(_libssh2_dh_ctx *dhctx) +{ + gcry_mpi_release(*dhctx); + *dhctx = NULL; +} + +/* _libssh2_supported_key_sign_algorithms + * + * Return supported key hash algo upgrades, see crypto.h + * + */ + +const char * +_libssh2_supported_key_sign_algorithms(LIBSSH2_SESSION *session, + unsigned char *key_method, + size_t key_method_len) +{ + (void)session; + (void)key_method; + (void)key_method_len; + + return NULL; +} + #endif /* LIBSSH2_LIBGCRYPT */ diff --git a/vendor/libssh2/src/libgcrypt.h b/vendor/libssh2/src/libgcrypt.h index 11d6ad2dc5..95876b96d1 100644 --- a/vendor/libssh2/src/libgcrypt.h +++ b/vendor/libssh2/src/libgcrypt.h @@ -1,3 +1,5 @@ +#ifndef __LIBSSH2_LIBGCRYPT_H +#define __LIBSSH2_LIBGCRYPT_H /* * Copyright (C) 2008, 2009, 2010 Simon Josefsson * Copyright (C) 2006, 2007, The Written Word, Inc. @@ -53,14 +55,21 @@ #define LIBSSH2_3DES 1 #define LIBSSH2_RSA 1 +#define LIBSSH2_RSA_SHA2 0 #define LIBSSH2_DSA 1 +#define LIBSSH2_ECDSA 0 +#define LIBSSH2_ED25519 0 #define MD5_DIGEST_LENGTH 16 #define SHA_DIGEST_LENGTH 20 #define SHA256_DIGEST_LENGTH 32 +#define SHA384_DIGEST_LENGTH 48 +#define SHA512_DIGEST_LENGTH 64 + +#define EC_MAX_POINT_LEN ((528 * 2 / 8) + 1) #define _libssh2_random(buf, len) \ - (gcry_randomize ((buf), (len), GCRY_STRONG_RANDOM), 1) + (gcry_randomize ((buf), (len), GCRY_STRONG_RANDOM), 0) #define libssh2_prepare_iovec(vec, len) /* Empty. */ @@ -68,60 +77,82 @@ /* returns 0 in case of failure */ #define libssh2_sha1_init(ctx) \ - (GPG_ERR_NO_ERROR == gcry_md_open (ctx, GCRY_MD_SHA1, 0)) + (GPG_ERR_NO_ERROR == gcry_md_open(ctx, GCRY_MD_SHA1, 0)) #define libssh2_sha1_update(ctx, data, len) \ - gcry_md_write (ctx, (unsigned char *) data, len) + gcry_md_write(ctx, (unsigned char *) data, len) #define libssh2_sha1_final(ctx, out) \ - memcpy (out, gcry_md_read (ctx, 0), SHA_DIGEST_LENGTH), gcry_md_close (ctx) + memcpy(out, gcry_md_read(ctx, 0), SHA_DIGEST_LENGTH), gcry_md_close(ctx) #define libssh2_sha1(message, len, out) \ - gcry_md_hash_buffer (GCRY_MD_SHA1, out, message, len) + gcry_md_hash_buffer(GCRY_MD_SHA1, out, message, len) #define libssh2_sha256_ctx gcry_md_hd_t #define libssh2_sha256_init(ctx) \ - (GPG_ERR_NO_ERROR == gcry_md_open (ctx, GCRY_MD_SHA256, 0)) + (GPG_ERR_NO_ERROR == gcry_md_open(ctx, GCRY_MD_SHA256, 0)) #define libssh2_sha256_update(ctx, data, len) \ - gcry_md_write (ctx, (unsigned char *) data, len) + gcry_md_write(ctx, (unsigned char *) data, len) #define libssh2_sha256_final(ctx, out) \ - memcpy (out, gcry_md_read (ctx, 0), SHA256_DIGEST_LENGTH), gcry_md_close (ctx) + memcpy(out, gcry_md_read(ctx, 0), SHA256_DIGEST_LENGTH), gcry_md_close(ctx) #define libssh2_sha256(message, len, out) \ - gcry_md_hash_buffer (GCRY_MD_SHA256, out, message, len) + gcry_md_hash_buffer(GCRY_MD_SHA256, out, message, len) + +#define libssh2_sha384_ctx gcry_md_hd_t + +#define libssh2_sha384_init(ctx) \ + (GPG_ERR_NO_ERROR == gcry_md_open(ctx, GCRY_MD_SHA384, 0)) +#define libssh2_sha384_update(ctx, data, len) \ + gcry_md_write(ctx, (unsigned char *) data, len) +#define libssh2_sha384_final(ctx, out) \ + memcpy(out, gcry_md_read(ctx, 0), SHA384_DIGEST_LENGTH), gcry_md_close(ctx) +#define libssh2_sha384(message, len, out) \ + gcry_md_hash_buffer(GCRY_MD_SHA384, out, message, len) + +#define libssh2_sha512_ctx gcry_md_hd_t + +#define libssh2_sha512_init(ctx) \ + (GPG_ERR_NO_ERROR == gcry_md_open(ctx, GCRY_MD_SHA512, 0)) +#define libssh2_sha512_update(ctx, data, len) \ + gcry_md_write(ctx, (unsigned char *) data, len) +#define libssh2_sha512_final(ctx, out) \ + memcpy(out, gcry_md_read(ctx, 0), SHA512_DIGEST_LENGTH), gcry_md_close(ctx) +#define libssh2_sha512(message, len, out) \ + gcry_md_hash_buffer(GCRY_MD_SHA512, out, message, len) #define libssh2_md5_ctx gcry_md_hd_t /* returns 0 in case of failure */ #define libssh2_md5_init(ctx) \ - (GPG_ERR_NO_ERROR == gcry_md_open (ctx, GCRY_MD_MD5, 0)) + (GPG_ERR_NO_ERROR == gcry_md_open(ctx, GCRY_MD_MD5, 0)) #define libssh2_md5_update(ctx, data, len) \ - gcry_md_write (ctx, (unsigned char *) data, len) + gcry_md_write(ctx, (unsigned char *) data, len) #define libssh2_md5_final(ctx, out) \ - memcpy (out, gcry_md_read (ctx, 0), MD5_DIGEST_LENGTH), gcry_md_close (ctx) + memcpy(out, gcry_md_read(ctx, 0), MD5_DIGEST_LENGTH), gcry_md_close(ctx) #define libssh2_md5(message, len, out) \ - gcry_md_hash_buffer (GCRY_MD_MD5, out, message, len) + gcry_md_hash_buffer(GCRY_MD_MD5, out, message, len) #define libssh2_hmac_ctx gcry_md_hd_t #define libssh2_hmac_ctx_init(ctx) #define libssh2_hmac_sha1_init(ctx, key, keylen) \ - gcry_md_open (ctx, GCRY_MD_SHA1, GCRY_MD_FLAG_HMAC), \ - gcry_md_setkey (*ctx, key, keylen) + gcry_md_open(ctx, GCRY_MD_SHA1, GCRY_MD_FLAG_HMAC), \ + gcry_md_setkey(*ctx, key, keylen) #define libssh2_hmac_md5_init(ctx, key, keylen) \ - gcry_md_open (ctx, GCRY_MD_MD5, GCRY_MD_FLAG_HMAC), \ - gcry_md_setkey (*ctx, key, keylen) + gcry_md_open(ctx, GCRY_MD_MD5, GCRY_MD_FLAG_HMAC), \ + gcry_md_setkey(*ctx, key, keylen) #define libssh2_hmac_ripemd160_init(ctx, key, keylen) \ - gcry_md_open (ctx, GCRY_MD_RMD160, GCRY_MD_FLAG_HMAC), \ - gcry_md_setkey (*ctx, key, keylen) + gcry_md_open(ctx, GCRY_MD_RMD160, GCRY_MD_FLAG_HMAC), \ + gcry_md_setkey(*ctx, key, keylen) #define libssh2_hmac_sha256_init(ctx, key, keylen) \ - gcry_md_open (ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC), \ - gcry_md_setkey (*ctx, key, keylen) + gcry_md_open(ctx, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC), \ + gcry_md_setkey(*ctx, key, keylen) #define libssh2_hmac_sha512_init(ctx, key, keylen) \ - gcry_md_open (ctx, GCRY_MD_SHA512, GCRY_MD_FLAG_HMAC), \ - gcry_md_setkey (*ctx, key, keylen) + gcry_md_open(ctx, GCRY_MD_SHA512, GCRY_MD_FLAG_HMAC), \ + gcry_md_setkey(*ctx, key, keylen) #define libssh2_hmac_update(ctx, data, datalen) \ - gcry_md_write (ctx, (unsigned char *) data, datalen) + gcry_md_write(ctx, (unsigned char *) data, datalen) #define libssh2_hmac_final(ctx, data) \ - memcpy (data, gcry_md_read (ctx, 0), \ - gcry_md_get_algo_dlen (gcry_md_get_algo (ctx))) + memcpy(data, gcry_md_read(ctx, 0), \ + gcry_md_get_algo_dlen(gcry_md_get_algo(ctx))) #define libssh2_hmac_cleanup(ctx) gcry_md_close (*ctx); #define libssh2_crypto_init() gcry_control (GCRYCTL_DISABLE_SECMEM) @@ -135,6 +166,11 @@ #define _libssh2_dsa_free(dsactx) gcry_sexp_release (dsactx) +#if LIBSSH2_ECDSA +#else +#define _libssh2_ec_key void +#endif + #define _libssh2_cipher_type(name) int name #define _libssh2_cipher_ctx gcry_cipher_hd_t @@ -171,13 +207,32 @@ #define _libssh2_bn_ctx_new() 0 #define _libssh2_bn_ctx_free(bnctx) ((void)0) #define _libssh2_bn_init() gcry_mpi_new(0) -#define _libssh2_bn_init_from_bin() NULL /* because gcry_mpi_scan() creates a new bignum */ -#define _libssh2_bn_rand(bn, bits, top, bottom) gcry_mpi_randomize (bn, bits, GCRY_WEAK_RANDOM) -#define _libssh2_bn_mod_exp(r, a, p, m, ctx) gcry_mpi_powm (r, a, p, m) +#define _libssh2_bn_init_from_bin() NULL /* because gcry_mpi_scan() creates a + new bignum */ #define _libssh2_bn_set_word(bn, val) gcry_mpi_set_ui(bn, val) -#define _libssh2_bn_from_bin(bn, len, val) gcry_mpi_scan(&((bn)), GCRYMPI_FMT_USG, val, len, NULL) -#define _libssh2_bn_to_bin(bn, val) gcry_mpi_print (GCRYMPI_FMT_USG, val, _libssh2_bn_bytes(bn), NULL, bn) -#define _libssh2_bn_bytes(bn) (gcry_mpi_get_nbits (bn) / 8 + ((gcry_mpi_get_nbits (bn) % 8 == 0) ? 0 : 1)) +#define _libssh2_bn_from_bin(bn, len, val) \ + gcry_mpi_scan(&((bn)), GCRYMPI_FMT_USG, val, len, NULL) +#define _libssh2_bn_to_bin(bn, val) \ + gcry_mpi_print(GCRYMPI_FMT_USG, val, _libssh2_bn_bytes(bn), NULL, bn) +#define _libssh2_bn_bytes(bn) \ + (gcry_mpi_get_nbits (bn) / 8 + \ + ((gcry_mpi_get_nbits (bn) % 8 == 0) ? 0 : 1)) #define _libssh2_bn_bits(bn) gcry_mpi_get_nbits (bn) #define _libssh2_bn_free(bn) gcry_mpi_release(bn) +#define _libssh2_dh_ctx struct gcry_mpi * +#define libssh2_dh_init(dhctx) _libssh2_dh_init(dhctx) +#define libssh2_dh_key_pair(dhctx, public, g, p, group_order, bnctx) \ + _libssh2_dh_key_pair(dhctx, public, g, p, group_order) +#define libssh2_dh_secret(dhctx, secret, f, p, bnctx) \ + _libssh2_dh_secret(dhctx, secret, f, p) +#define libssh2_dh_dtor(dhctx) _libssh2_dh_dtor(dhctx) +extern void _libssh2_dh_init(_libssh2_dh_ctx *dhctx); +extern int _libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, + int group_order); +extern int _libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p); +extern void _libssh2_dh_dtor(_libssh2_dh_ctx *dhctx); + +#endif /* __LIBSSH2_LIBGCRYPT_H */ diff --git a/vendor/libssh2/src/libssh2_config.h.in b/vendor/libssh2/src/libssh2_config.h.in index af4ab9ca0a..94adf42832 100644 --- a/vendor/libssh2/src/libssh2_config.h.in +++ b/vendor/libssh2/src/libssh2_config.h.in @@ -3,19 +3,13 @@ /* Define if building universal (internal helper macro) */ #undef AC_APPLE_UNIVERSAL_BUILD -/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP - systems. This function is required for `alloca.c' support on those systems. - */ -#undef CRAY_STACKSEG_END - -/* Define to 1 if using `alloca.c'. */ +/* Define to 1 if using 'alloca.c'. */ #undef C_ALLOCA -/* Define to 1 if you have `alloca', as a function or macro. */ +/* Define to 1 if you have 'alloca', as a function or macro. */ #undef HAVE_ALLOCA -/* Define to 1 if you have and it should be used (not on Ultrix). - */ +/* Define to 1 if works. */ #undef HAVE_ALLOCA_H /* Define to 1 if you have the header file. */ @@ -64,8 +58,8 @@ /* Define if you have the gcrypt library. */ #undef HAVE_LIBGCRYPT -/* Define if you have the mbedtls library. */ -#undef HAVE_LIBMBEDTLS +/* Define if you have the mbedcrypto library. */ +#undef HAVE_LIBMBEDCRYPTO /* Define if you have the ssl library. */ #undef HAVE_LIBSSL @@ -76,8 +70,8 @@ /* Define to 1 if the compiler supports the 'long long' data type. */ #undef HAVE_LONGLONG -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H +/* Define to 1 if you have the `memset_s' function. */ +#undef HAVE_MEMSET_S /* Define to 1 if you have the header file. */ #undef HAVE_NETINET_IN_H @@ -178,10 +172,10 @@ /* Use mbedtls */ #undef LIBSSH2_MBEDTLS -/* Use OpenSSL */ +/* Use openssl */ #undef LIBSSH2_OPENSSL -/* Use Windows CNG */ +/* Use wincng */ #undef LIBSSH2_WINCNG /* Define to the sub-directory where libtool stores uninstalled libraries. */ @@ -219,7 +213,9 @@ STACK_DIRECTION = 0 => direction of growth unknown */ #undef STACK_DIRECTION -/* Define to 1 if you have the ANSI C header files. */ +/* Define to 1 if all of the C90 standard headers exist (not just the ones + required in a freestanding environment). This macro is provided for + backward compatibility; new code need not use it. */ #undef STDC_HEADERS /* Version number of package */ @@ -237,11 +233,6 @@ # endif #endif -/* Enable large inode numbers on Mac OS X 10.5. */ -#ifndef _DARWIN_USE_64_BIT_INODE -# define _DARWIN_USE_64_BIT_INODE 1 -#endif - /* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS diff --git a/vendor/libssh2/src/libssh2_priv.h b/vendor/libssh2/src/libssh2_priv.h index b4296a221a..be16ad2e31 100644 --- a/vendor/libssh2/src/libssh2_priv.h +++ b/vendor/libssh2/src/libssh2_priv.h @@ -1,3 +1,5 @@ +#ifndef __LIBSSH2_PRIV_H +#define __LIBSSH2_PRIV_H /* Copyright (c) 2004-2008, 2010, Sara Golemon * Copyright (c) 2009-2014 by Daniel Stenberg * Copyright (c) 2010 Simon Josefsson @@ -37,9 +39,6 @@ * OF SUCH DAMAGE. */ -#ifndef LIBSSH2_PRIV_H -#define LIBSSH2_PRIV_H 1 - #define LIBSSH2_LIBRARY #include "libssh2_config.h" @@ -58,18 +57,15 @@ #include #include -/* The following CPP block should really only be in session.c and - packet.c. However, AIX have #define's for 'events' and 'revents' - and we are using those names in libssh2.h, so we need to include - the AIX headers first, to make sure all code is compiled with - consistent names of these fields. While arguable the best would to - change libssh2.h to use other names, that would break backwards - compatibility. For more information, see: - https://www.mail-archive.com/libssh2-devel%40lists.sourceforge.net/msg00003.html - https://www.mail-archive.com/libssh2-devel%40lists.sourceforge.net/msg00224.html +/* The following CPP block should really only be in session.c and packet.c. + However, AIX have #define's for 'events' and 'revents' and we are using + those names in libssh2.h, so we need to include the AIX headers first, to + make sure all code is compiled with consistent names of these fields. + While arguable the best would to change libssh2.h to use other names, that + would break backwards compatibility. */ #ifdef HAVE_POLL -# include +# include #else # if defined(HAVE_SELECT) && !defined(WIN32) # ifdef HAVE_SYS_SELECT_H @@ -113,18 +109,23 @@ #define inline __inline #endif -/* Provide iovec / writev on WIN32 platform. */ -#ifdef WIN32 +/* 3DS doesn't seem to have iovec */ +#if defined(WIN32) || defined(_3DS) struct iovec { size_t iov_len; - void * iov_base; + void *iov_base; }; +#endif + +/* Provide iovec / writev on WIN32 platform. */ +#ifdef WIN32 + static inline int writev(int sock, struct iovec *iov, int nvecs) { DWORD ret; - if (WSASend(sock, (LPWSABUF)iov, nvecs, &ret, 0, NULL, NULL) == 0) { + if(WSASend(sock, (LPWSABUF)iov, nvecs, &ret, 0, NULL, NULL) == 0) { return ret; } return -1; @@ -146,6 +147,18 @@ static inline int writev(int sock, struct iovec *iov, int nvecs) #endif +#ifndef SIZE_MAX +#if _WIN64 +#define SIZE_MAX 0xFFFFFFFFFFFFFFFF +#else +#define SIZE_MAX 0xFFFFFFFF +#endif +#endif + +#ifndef UINT_MAX +#define UINT_MAX 0xFFFFFFFF +#endif + /* RFC4253 section 6.1 Maximum Packet Length says: * * "All implementations MUST be able to process packets with @@ -154,7 +167,7 @@ static inline int writev(int sock, struct iovec *iov, int nvecs) * padding length, payload, padding, and MAC.)." */ #define MAX_SSH_PACKET_LEN 35000 -#define MAX_SHA_DIGEST_LEN SHA256_DIGEST_LENGTH +#define MAX_SHA_DIGEST_LEN SHA512_DIGEST_LENGTH #define LIBSSH2_ALLOC(session, count) \ session->alloc((count), &(session)->abstract) @@ -248,11 +261,10 @@ typedef struct kmdhgGPshakex_state_t size_t s_packet_len; size_t tmp_len; _libssh2_bn_ctx *ctx; - _libssh2_bn *x; + _libssh2_dh_ctx x; _libssh2_bn *e; _libssh2_bn *f; _libssh2_bn *k; - unsigned char *s; unsigned char *f_value; unsigned char *k_value; unsigned char *h_sig; @@ -271,10 +283,18 @@ typedef struct key_exchange_state_low_t kmdhgGPshakex_state_t exchange_state; _libssh2_bn *p; /* SSH2 defined value (p_value) */ _libssh2_bn *g; /* SSH2 defined value (2) */ - unsigned char request[13]; + unsigned char request[256]; /* Must fit EC_MAX_POINT_LEN + data */ unsigned char *data; size_t request_len; size_t data_len; + _libssh2_ec_key *private_key; /* SSH2 ecdh private key */ + unsigned char *public_key_oct; /* SSH2 ecdh public key octal value */ + size_t public_key_oct_len; /* SSH2 ecdh public key octal value + length */ + unsigned char *curve25519_public_key; /* curve25519 public key, 32 + bytes */ + unsigned char *curve25519_private_key; /* curve25519 private key, 32 + bytes */ } key_exchange_state_low_t; typedef struct key_exchange_state_t @@ -406,7 +426,8 @@ struct _LIBSSH2_CHANNEL /* State variables used in libssh2_channel_receive_window_adjust() */ libssh2_nonblocking_states adjust_state; - unsigned char adjust_adjust[9]; /* packet_type(1) + channel(4) + adjustment(4) */ + unsigned char adjust_adjust[9]; /* packet_type(1) + channel(4) + + adjustment(4) */ /* State variables used in libssh2_channel_read_ex() */ libssh2_nonblocking_states read_state; @@ -435,6 +456,13 @@ struct _LIBSSH2_CHANNEL /* State variables used in libssh2_channel_handle_extended_data2() */ libssh2_nonblocking_states extData2_state; + /* State variables used in libssh2_channel_request_auth_agent() */ + libssh2_nonblocking_states req_auth_agent_try_state; + libssh2_nonblocking_states req_auth_agent_state; + unsigned char req_auth_agent_packet[36]; + size_t req_auth_agent_packet_len; + unsigned char req_auth_agent_local_channel[4]; + packet_requirev_state_t req_auth_agent_requirev_state; }; struct _LIBSSH2_LISTENER @@ -609,6 +637,15 @@ struct _LIBSSH2_SESSION unsigned char server_hostkey_sha1[SHA_DIGEST_LENGTH]; int server_hostkey_sha1_valid; + unsigned char server_hostkey_sha256[SHA256_DIGEST_LENGTH]; + int server_hostkey_sha256_valid; + + /* public key algorithms accepted as comma separated list */ + char *server_sign_algorithms; + + /* key signing algorithm preferences -- NULL yields server order */ + char *sign_algo_prefs; + /* (remote as source of data -- packet_read ) */ libssh2_endpoint_data remote; @@ -642,8 +679,9 @@ struct _LIBSSH2_SESSION struct transportpacket packet; #ifdef LIBSSH2DEBUG int showmask; /* what debug/trace messages to display */ - libssh2_trace_handler_func tracehandler; /* callback to display trace messages */ - void* tracehandler_context; /* context for the trace handler */ + libssh2_trace_handler_func tracehandler; /* callback to display trace + messages */ + void *tracehandler_context; /* context for the trace handler */ #endif /* State variables used in libssh2_banner_send() */ @@ -681,6 +719,7 @@ struct _LIBSSH2_SESSION libssh2_nonblocking_states userauth_list_state; unsigned char *userauth_list_data; size_t userauth_list_data_len; + char *userauth_banner; packet_requirev_state_t userauth_list_packet_requirev_state; /* State variables used in libssh2_userauth_password_ex() */ @@ -850,7 +889,8 @@ struct _LIBSSH2_KEX_METHOD { const char *name; - /* Key exchange, populates session->* and returns 0 on success, non-0 on error */ + /* Key exchange, populates session->* and returns 0 on success, non-0 on + error */ int (*exchange_keys) (LIBSSH2_SESSION * session, key_exchange_state_low_t * key_state); @@ -867,8 +907,10 @@ struct _LIBSSH2_HOSTKEY_METHOD int (*initPEM) (LIBSSH2_SESSION * session, const char *privkeyfile, unsigned const char *passphrase, void **abstract); int (*initPEMFromMemory) (LIBSSH2_SESSION * session, - const char *privkeyfiledata, size_t privkeyfiledata_len, - unsigned const char *passphrase, void **abstract); + const char *privkeyfiledata, + size_t privkeyfiledata_len, + unsigned const char *passphrase, + void **abstract); int (*sig_verify) (LIBSSH2_SESSION * session, const unsigned char *sig, size_t sig_len, const unsigned char *m, size_t m_len, void **abstract); @@ -884,6 +926,7 @@ struct _LIBSSH2_HOSTKEY_METHOD struct _LIBSSH2_CRYPT_METHOD { const char *name; + const char *pem_annotation; int blocksize; @@ -930,7 +973,8 @@ struct _LIBSSH2_COMP_METHOD void _libssh2_debug(LIBSSH2_SESSION * session, int context, const char *format, ...); #else -#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || defined(__GNUC__) +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ + defined(__GNUC__) /* C99 supported and also by older GCC */ #define _libssh2_debug(x,y,z,...) do {} while (0) #else @@ -951,7 +995,8 @@ _libssh2_debug(LIBSSH2_SESSION * session, int context, const char *format, ...) /* Initial packet state, prior to MAC check */ #define LIBSSH2_MAC_UNCONFIRMED 1 -/* When MAC type is "none" (proto initiation phase) all packets are deemed "confirmed" */ +/* When MAC type is "none" (proto initiation phase) all packets are deemed + "confirmed" */ #define LIBSSH2_MAC_CONFIRMED 0 /* Something very bad is going on */ #define LIBSSH2_MAC_INVALID -1 @@ -968,6 +1013,7 @@ _libssh2_debug(LIBSSH2_SESSION * session, int context, const char *format, ...) #define SSH_MSG_DEBUG 4 #define SSH_MSG_SERVICE_REQUEST 5 #define SSH_MSG_SERVICE_ACCEPT 6 +#define SSH_MSG_EXT_INFO 7 #define SSH_MSG_KEXINIT 20 #define SSH_MSG_NEWKEYS 21 @@ -976,13 +1022,18 @@ _libssh2_debug(LIBSSH2_SESSION * session, int context, const char *format, ...) #define SSH_MSG_KEXDH_INIT 30 #define SSH_MSG_KEXDH_REPLY 31 -/* diffie-hellman-group-exchange-sha1 and diffie-hellman-group-exchange-sha256 */ +/* diffie-hellman-group-exchange-sha1 and + diffie-hellman-group-exchange-sha256 */ #define SSH_MSG_KEX_DH_GEX_REQUEST_OLD 30 #define SSH_MSG_KEX_DH_GEX_REQUEST 34 #define SSH_MSG_KEX_DH_GEX_GROUP 31 #define SSH_MSG_KEX_DH_GEX_INIT 32 #define SSH_MSG_KEX_DH_GEX_REPLY 33 +/* ecdh */ +#define SSH2_MSG_KEX_ECDH_INIT 30 +#define SSH2_MSG_KEX_ECDH_REPLY 31 + /* User Authentication */ #define SSH_MSG_USERAUTH_REQUEST 50 #define SSH_MSG_USERAUTH_FAILURE 51 @@ -1037,31 +1088,75 @@ int _libssh2_kex_exchange(LIBSSH2_SESSION * session, int reexchange, const LIBSSH2_CRYPT_METHOD **libssh2_crypt_methods(void); const LIBSSH2_HOSTKEY_METHOD **libssh2_hostkey_methods(void); +/* misc.c */ +int _libssh2_bcrypt_pbkdf(const char *pass, + size_t passlen, + const uint8_t *salt, + size_t saltlen, + uint8_t *key, + size_t keylen, + unsigned int rounds); + /* pem.c */ int _libssh2_pem_parse(LIBSSH2_SESSION * session, const char *headerbegin, const char *headerend, + const unsigned char *passphrase, FILE * fp, unsigned char **data, unsigned int *datalen); int _libssh2_pem_parse_memory(LIBSSH2_SESSION * session, const char *headerbegin, const char *headerend, const char *filedata, size_t filedata_len, unsigned char **data, unsigned int *datalen); + /* OpenSSL keys */ +int +_libssh2_openssh_pem_parse(LIBSSH2_SESSION * session, + const unsigned char *passphrase, + FILE * fp, struct string_buf **decrypted_buf); +int +_libssh2_openssh_pem_parse_memory(LIBSSH2_SESSION * session, + const unsigned char *passphrase, + const char *filedata, size_t filedata_len, + struct string_buf **decrypted_buf); + int _libssh2_pem_decode_sequence(unsigned char **data, unsigned int *datalen); int _libssh2_pem_decode_integer(unsigned char **data, unsigned int *datalen, unsigned char **i, unsigned int *ilen); /* global.c */ -void _libssh2_init_if_needed (void); +void _libssh2_init_if_needed(void); #define ARRAY_SIZE(a) (sizeof ((a)) / sizeof ((a)[0])) /* define to output the libssh2_int64_t type in a *printf() */ -#if defined( __BORLANDC__ ) || defined( _MSC_VER ) || defined( __MINGW32__ ) +#if defined(__BORLANDC__) || defined(_MSC_VER) || defined(__MINGW32__) #define LIBSSH2_INT64_T_FORMAT "I64d" #else #define LIBSSH2_INT64_T_FORMAT "lld" #endif -#endif /* LIBSSH2_H */ +/* In Windows the default file mode is text but an application can override it. +Therefore we specify it explicitly. https://github.com/curl/curl/pull/258 +*/ +#if defined(WIN32) || defined(MSDOS) +#define FOPEN_READTEXT "rt" +#define FOPEN_WRITETEXT "wt" +#define FOPEN_APPENDTEXT "at" +#elif defined(__CYGWIN__) +/* Cygwin has specific behavior we need to address when WIN32 is not defined. +https://cygwin.com/cygwin-ug-net/using-textbinary.html +For write we want our output to have line endings of LF and be compatible with +other Cygwin utilities. For read we want to handle input that may have line +endings either CRLF or LF so 't' is appropriate. +*/ +#define FOPEN_READTEXT "rt" +#define FOPEN_WRITETEXT "w" +#define FOPEN_APPENDTEXT "a" +#else +#define FOPEN_READTEXT "r" +#define FOPEN_WRITETEXT "w" +#define FOPEN_APPENDTEXT "a" +#endif + +#endif /* __LIBSSH2_PRIV_H */ diff --git a/vendor/libssh2/src/mac.c b/vendor/libssh2/src/mac.c index 5ec26eb3b4..5ac71df4ce 100644 --- a/vendor/libssh2/src/mac.c +++ b/vendor/libssh2/src/mac.c @@ -86,7 +86,7 @@ mac_method_common_init(LIBSSH2_SESSION * session, unsigned char *key, static int mac_method_common_dtor(LIBSSH2_SESSION * session, void **abstract) { - if (*abstract) { + if(*abstract) { LIBSSH2_FREE(session, *abstract); } *abstract = NULL; @@ -118,7 +118,7 @@ mac_method_hmac_sha2_512_hash(LIBSSH2_SESSION * session, libssh2_hmac_sha512_init(&ctx, *abstract, 64); libssh2_hmac_update(ctx, seqno_buf, 4); libssh2_hmac_update(ctx, packet, packet_len); - if (addtl && addtl_len) { + if(addtl && addtl_len) { libssh2_hmac_update(ctx, addtl, addtl_len); } libssh2_hmac_final(ctx, buf); @@ -163,7 +163,7 @@ mac_method_hmac_sha2_256_hash(LIBSSH2_SESSION * session, libssh2_hmac_sha256_init(&ctx, *abstract, 32); libssh2_hmac_update(ctx, seqno_buf, 4); libssh2_hmac_update(ctx, packet, packet_len); - if (addtl && addtl_len) { + if(addtl && addtl_len) { libssh2_hmac_update(ctx, addtl, addtl_len); } libssh2_hmac_final(ctx, buf); @@ -208,7 +208,7 @@ mac_method_hmac_sha1_hash(LIBSSH2_SESSION * session, libssh2_hmac_sha1_init(&ctx, *abstract, 20); libssh2_hmac_update(ctx, seqno_buf, 4); libssh2_hmac_update(ctx, packet, packet_len); - if (addtl && addtl_len) { + if(addtl && addtl_len) { libssh2_hmac_update(ctx, addtl, addtl_len); } libssh2_hmac_final(ctx, buf); @@ -281,7 +281,7 @@ mac_method_hmac_md5_hash(LIBSSH2_SESSION * session, unsigned char *buf, libssh2_hmac_md5_init(&ctx, *abstract, 16); libssh2_hmac_update(ctx, seqno_buf, 4); libssh2_hmac_update(ctx, packet, packet_len); - if (addtl && addtl_len) { + if(addtl && addtl_len) { libssh2_hmac_update(ctx, addtl, addtl_len); } libssh2_hmac_final(ctx, buf); @@ -354,7 +354,7 @@ mac_method_hmac_ripemd160_hash(LIBSSH2_SESSION * session, libssh2_hmac_ripemd160_init(&ctx, *abstract, 20); libssh2_hmac_update(ctx, seqno_buf, 4); libssh2_hmac_update(ctx, packet, packet_len); - if (addtl && addtl_len) { + if(addtl && addtl_len) { libssh2_hmac_update(ctx, addtl, addtl_len); } libssh2_hmac_final(ctx, buf); diff --git a/vendor/libssh2/src/mac.h b/vendor/libssh2/src/mac.h index 66d3e61011..46fce54248 100644 --- a/vendor/libssh2/src/mac.h +++ b/vendor/libssh2/src/mac.h @@ -1,6 +1,5 @@ #ifndef __LIBSSH2_MAC_H #define __LIBSSH2_MAC_H - /* Copyright (C) 2009-2010 by Daniel Stenberg * * Redistribution and use in source and binary forms, diff --git a/vendor/libssh2/src/mbedtls.c b/vendor/libssh2/src/mbedtls.c index 1d181e18ff..dc76ef59a9 100644 --- a/vendor/libssh2/src/mbedtls.c +++ b/vendor/libssh2/src/mbedtls.c @@ -1,7 +1,52 @@ +/* Copyright (c) 2016, Art + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * Neither the name of the copyright holder nor the names + * of any other contributors may be used to endorse or + * promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + #include "libssh2_priv.h" #ifdef LIBSSH2_MBEDTLS /* compile only if we build with mbedtls */ +/*******************************************************************/ +/* + * mbedTLS backend: Global context handles + */ + +static mbedtls_entropy_context _libssh2_mbedtls_entropy; +static mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; + /*******************************************************************/ /* * mbedTLS backend: Generic functions @@ -18,7 +63,7 @@ _libssh2_mbedtls_init(void) ret = mbedtls_ctr_drbg_seed(&_libssh2_mbedtls_ctr_drbg, mbedtls_entropy_func, &_libssh2_mbedtls_entropy, NULL, 0); - if (ret != 0) + if(ret != 0) mbedtls_ctr_drbg_free(&_libssh2_mbedtls_ctr_drbg); } @@ -44,12 +89,12 @@ _libssh2_mbedtls_safe_free(void *buf, int len) (void)len; #endif - if (!buf) + if(!buf) return; #ifdef LIBSSH2_CLEAR_MEMORY - if (len > 0) - memset(buf, 0, len); + if(len > 0) + _libssh2_explicit_zero(buf, len); #endif mbedtls_free(buf); @@ -65,7 +110,7 @@ _libssh2_mbedtls_cipher_init(_libssh2_cipher_ctx *ctx, const mbedtls_cipher_info_t *cipher_info; int ret, op; - if (!ctx) + if(!ctx) return -1; op = encrypt == 0 ? MBEDTLS_ENCRYPT : MBEDTLS_DECRYPT; @@ -99,11 +144,10 @@ _libssh2_mbedtls_cipher_crypt(_libssh2_cipher_ctx *ctx, (void) encrypt; (void) algo; - osize = blocklen+mbedtls_cipher_get_block_size(ctx); + osize = blocklen + mbedtls_cipher_get_block_size(ctx); output = (unsigned char *)mbedtls_calloc(osize, sizeof(char)); - if(output) - { + if(output) { ret = mbedtls_cipher_reset(ctx); if(!ret) @@ -112,7 +156,7 @@ _libssh2_mbedtls_cipher_crypt(_libssh2_cipher_ctx *ctx, if(!ret) ret = mbedtls_cipher_finish(ctx, output + olen, &finish_olen); - if (!ret) { + if(!ret) { olen += finish_olen; memcpy(block, output, olen); } @@ -148,8 +192,8 @@ _libssh2_mbedtls_hash_init(mbedtls_md_context_t *ctx, mbedtls_md_init(ctx); ret = mbedtls_md_setup(ctx, md_info, hmac); - if (!ret){ - if (hmac) + if(!ret) { + if(hmac) ret = mbedtls_md_hmac_starts(ctx, key, keylen); else ret = mbedtls_md_starts(ctx); @@ -196,50 +240,61 @@ _libssh2_mbedtls_bignum_init(void) _libssh2_bn *bignum; bignum = (_libssh2_bn *)mbedtls_calloc(1, sizeof(_libssh2_bn)); - if (bignum) { + if(bignum) { mbedtls_mpi_init(bignum); } return bignum; } -int +void +_libssh2_mbedtls_bignum_free(_libssh2_bn *bn) +{ + if(bn) { + mbedtls_mpi_free(bn); + mbedtls_free(bn); + } +} + +static int _libssh2_mbedtls_bignum_random(_libssh2_bn *bn, int bits, int top, int bottom) { size_t len; int err; int i; - if (!bn || bits <= 0) + if(!bn || bits <= 0) return -1; len = (bits + 7) >> 3; - err = mbedtls_mpi_fill_random(bn, len, mbedtls_ctr_drbg_random, &_libssh2_mbedtls_ctr_drbg); - if (err) + err = mbedtls_mpi_fill_random(bn, len, mbedtls_ctr_drbg_random, + &_libssh2_mbedtls_ctr_drbg); + if(err) return -1; - /* Zero unsued bits above the most significant bit*/ - for(i=len*8-1;bits<=i;--i) { + /* Zero unused bits above the most significant bit*/ + for(i = len*8 - 1; bits <= i; --i) { err = mbedtls_mpi_set_bit(bn, i, 0); - if (err) + if(err) return -1; } - /* If `top` is -1, the most significant bit of the random number can be zero. - If top is 0, the most significant bit of the random number is set to 1, - and if top is 1, the two most significant bits of the number will be set - to 1, so that the product of two such random numbers will always have 2*bits length. + /* If `top` is -1, the most significant bit of the random number can be + zero. If top is 0, the most significant bit of the random number is + set to 1, and if top is 1, the two most significant bits of the number + will be set to 1, so that the product of two such random numbers will + always have 2*bits length. */ - for(i=0;i<=top;++i) { + for(i = 0; i <= top; ++i) { err = mbedtls_mpi_set_bit(bn, bits-i-1, 1); - if (err) + if(err) return -1; } /* make odd by setting first bit in least significant byte */ - if (bottom) { + if(bottom) { err = mbedtls_mpi_set_bit(bn, 0, 1); - if (err) + if(err) return -1; } @@ -275,42 +330,40 @@ _libssh2_mbedtls_rsa_new(libssh2_rsa_ctx **rsa, libssh2_rsa_ctx *ctx; ctx = (libssh2_rsa_ctx *) mbedtls_calloc(1, sizeof(libssh2_rsa_ctx)); - if (ctx != NULL) { + if(ctx != NULL) { mbedtls_rsa_init(ctx, MBEDTLS_RSA_PKCS_V15, 0); } else return -1; - if( (ret = mbedtls_mpi_read_binary(&(ctx->E), edata, elen) ) != 0 || - (ret = mbedtls_mpi_read_binary(&(ctx->N), ndata, nlen) ) != 0 ) - { + /* !checksrc! disable ASSIGNWITHINCONDITION 1 */ + if((ret = mbedtls_mpi_read_binary(&(ctx->E), edata, elen) ) != 0 || + (ret = mbedtls_mpi_read_binary(&(ctx->N), ndata, nlen) ) != 0) { ret = -1; } - if (!ret) - { + if(!ret) { ctx->len = mbedtls_mpi_size(&(ctx->N)); } - if (!ret && ddata) - { - if( (ret = mbedtls_mpi_read_binary(&(ctx->D) , ddata, dlen) ) != 0 || - (ret = mbedtls_mpi_read_binary(&(ctx->P) , pdata, plen) ) != 0 || - (ret = mbedtls_mpi_read_binary(&(ctx->Q) , qdata, qlen) ) != 0 || - (ret = mbedtls_mpi_read_binary(&(ctx->DP), e1data, e1len) ) != 0 || - (ret = mbedtls_mpi_read_binary(&(ctx->DQ), e2data, e2len) ) != 0 || - (ret = mbedtls_mpi_read_binary(&(ctx->QP), coeffdata, coefflen) ) != 0 ) - { + if(!ret && ddata) { + /* !checksrc! disable ASSIGNWITHINCONDITION 1 */ + if((ret = mbedtls_mpi_read_binary(&(ctx->D), ddata, dlen) ) != 0 || + (ret = mbedtls_mpi_read_binary(&(ctx->P), pdata, plen) ) != 0 || + (ret = mbedtls_mpi_read_binary(&(ctx->Q), qdata, qlen) ) != 0 || + (ret = mbedtls_mpi_read_binary(&(ctx->DP), e1data, e1len) ) != 0 || + (ret = mbedtls_mpi_read_binary(&(ctx->DQ), e2data, e2len) ) != 0 || + (ret = mbedtls_mpi_read_binary(&(ctx->QP), coeffdata, coefflen) ) + != 0) { ret = -1; } ret = mbedtls_rsa_check_privkey(ctx); } - else if (!ret) - { + else if(!ret) { ret = mbedtls_rsa_check_pubkey(ctx); } - if (ret && ctx) { + if(ret && ctx) { _libssh2_mbedtls_rsa_free(ctx); ctx = NULL; } @@ -326,17 +379,17 @@ _libssh2_mbedtls_rsa_new_private(libssh2_rsa_ctx **rsa, { int ret; mbedtls_pk_context pkey; + mbedtls_rsa_context *pk_rsa; *rsa = (libssh2_rsa_ctx *) LIBSSH2_ALLOC(session, sizeof(libssh2_rsa_ctx)); - if (*rsa == NULL) + if(*rsa == NULL) return -1; mbedtls_rsa_init(*rsa, MBEDTLS_RSA_PKCS_V15, 0); mbedtls_pk_init(&pkey); ret = mbedtls_pk_parse_keyfile(&pkey, filename, (char *)passphrase); - if( ret != 0 || mbedtls_pk_get_type(&pkey) != MBEDTLS_PK_RSA) - { + if(ret != 0 || mbedtls_pk_get_type(&pkey) != MBEDTLS_PK_RSA) { mbedtls_pk_free(&pkey); mbedtls_rsa_free(*rsa); LIBSSH2_FREE(session, *rsa); @@ -344,7 +397,7 @@ _libssh2_mbedtls_rsa_new_private(libssh2_rsa_ctx **rsa, return -1; } - mbedtls_rsa_context *pk_rsa = mbedtls_pk_rsa(pkey); + pk_rsa = mbedtls_pk_rsa(pkey); mbedtls_rsa_copy(*rsa, pk_rsa); mbedtls_pk_free(&pkey); @@ -360,17 +413,33 @@ _libssh2_mbedtls_rsa_new_private_frommemory(libssh2_rsa_ctx **rsa, { int ret; mbedtls_pk_context pkey; + mbedtls_rsa_context *pk_rsa; + void *filedata_nullterm; + size_t pwd_len; - *rsa = (libssh2_rsa_ctx *) mbedtls_calloc( 1, sizeof( libssh2_rsa_ctx ) ); - if (*rsa == NULL) + *rsa = (libssh2_rsa_ctx *) mbedtls_calloc(1, sizeof(libssh2_rsa_ctx)); + if(*rsa == NULL) return -1; + /* + mbedtls checks in "mbedtls/pkparse.c:1184" if "key[keylen - 1] != '\0'" + private-key from memory will fail if the last byte is not a null byte + */ + filedata_nullterm = mbedtls_calloc(filedata_len + 1, 1); + if(filedata_nullterm == NULL) { + return -1; + } + memcpy(filedata_nullterm, filedata, filedata_len); + mbedtls_pk_init(&pkey); - ret = mbedtls_pk_parse_key(&pkey, (unsigned char *)filedata, - filedata_len, NULL, 0); - if( ret != 0 || mbedtls_pk_get_type(&pkey) != MBEDTLS_PK_RSA) - { + pwd_len = passphrase != NULL ? strlen((const char *)passphrase) : 0; + ret = mbedtls_pk_parse_key(&pkey, (unsigned char *)filedata_nullterm, + filedata_len + 1, + passphrase, pwd_len); + _libssh2_mbedtls_safe_free(filedata_nullterm, filedata_len); + + if(ret != 0 || mbedtls_pk_get_type(&pkey) != MBEDTLS_PK_RSA) { mbedtls_pk_free(&pkey); mbedtls_rsa_free(*rsa); LIBSSH2_FREE(session, *rsa); @@ -378,7 +447,7 @@ _libssh2_mbedtls_rsa_new_private_frommemory(libssh2_rsa_ctx **rsa, return -1; } - mbedtls_rsa_context *pk_rsa = mbedtls_pk_rsa(pkey); + pk_rsa = mbedtls_pk_rsa(pkey); mbedtls_rsa_copy(*rsa, pk_rsa); mbedtls_pk_free(&pkey); @@ -400,7 +469,8 @@ _libssh2_mbedtls_rsa_sha1_verify(libssh2_rsa_ctx *rsa, return -1; /* failure */ ret = mbedtls_rsa_pkcs1_verify(rsa, NULL, NULL, MBEDTLS_RSA_PUBLIC, - MBEDTLS_MD_SHA1, SHA_DIGEST_LENGTH, hash, sig); + MBEDTLS_MD_SHA1, SHA_DIGEST_LENGTH, + hash, sig); return (ret == 0) ? 0 : -1; } @@ -421,14 +491,14 @@ _libssh2_mbedtls_rsa_sha1_sign(LIBSSH2_SESSION *session, sig_len = rsa->len; sig = LIBSSH2_ALLOC(session, sig_len); - if (!sig) { + if(!sig) { return -1; } ret = mbedtls_rsa_pkcs1_sign(rsa, NULL, NULL, MBEDTLS_RSA_PRIVATE, MBEDTLS_MD_SHA1, SHA_DIGEST_LENGTH, hash, sig); - if (ret) { + if(ret) { LIBSSH2_FREE(session, sig); return -1; } @@ -453,8 +523,8 @@ gen_publickey_from_rsa(LIBSSH2_SESSION *session, { int e_bytes, n_bytes; unsigned long len; - unsigned char* key; - unsigned char* p; + unsigned char *key; + unsigned char *p; e_bytes = mbedtls_mpi_size(&rsa->E); n_bytes = mbedtls_mpi_size(&rsa->N); @@ -463,7 +533,7 @@ gen_publickey_from_rsa(LIBSSH2_SESSION *session, len = 4 + 7 + 4 + e_bytes + 4 + n_bytes; key = LIBSSH2_ALLOC(session, len); - if (!key) { + if(!key) { return NULL; } @@ -498,36 +568,38 @@ _libssh2_mbedtls_pub_priv_key(LIBSSH2_SESSION *session, unsigned char *key = NULL, *mth = NULL; size_t keylen = 0, mthlen = 0; int ret; + mbedtls_rsa_context *rsa; - if( mbedtls_pk_get_type(pkey) != MBEDTLS_PK_RSA ) - { + if(mbedtls_pk_get_type(pkey) != MBEDTLS_PK_RSA) { mbedtls_pk_free(pkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Key type not supported"); } - // write method + /* write method */ mthlen = 7; mth = LIBSSH2_ALLOC(session, mthlen); - if (mth) { + if(mth) { memcpy(mth, "ssh-rsa", mthlen); - } else { + } + else { ret = -1; } - mbedtls_rsa_context *rsa = mbedtls_pk_rsa(*pkey); + rsa = mbedtls_pk_rsa(*pkey); key = gen_publickey_from_rsa(session, rsa, &keylen); - if (key == NULL) { + if(key == NULL) { ret = -1; } - // write output - if (ret) { - if (mth) + /* write output */ + if(ret) { + if(mth) LIBSSH2_FREE(session, mth); - if (key) + if(key) LIBSSH2_FREE(session, key); - } else { + } + else { *method = mth; *method_len = mthlen; *pubkeydata = key; @@ -552,8 +624,7 @@ _libssh2_mbedtls_pub_priv_keyfile(LIBSSH2_SESSION *session, mbedtls_pk_init(&pkey); ret = mbedtls_pk_parse_keyfile(&pkey, privatekey, passphrase); - if( ret != 0 ) - { + if(ret != 0) { mbedtls_strerror(ret, (char *)buf, sizeof(buf)); mbedtls_pk_free(&pkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, buf); @@ -580,12 +651,29 @@ _libssh2_mbedtls_pub_priv_keyfilememory(LIBSSH2_SESSION *session, mbedtls_pk_context pkey; char buf[1024]; int ret; + void *privatekeydata_nullterm; + size_t pwd_len; + + /* + mbedtls checks in "mbedtls/pkparse.c:1184" if "key[keylen - 1] != '\0'" + private-key from memory will fail if the last byte is not a null byte + */ + privatekeydata_nullterm = mbedtls_calloc(privatekeydata_len + 1, 1); + if(privatekeydata_nullterm == NULL) { + return -1; + } + memcpy(privatekeydata_nullterm, privatekeydata, privatekeydata_len); mbedtls_pk_init(&pkey); - ret = mbedtls_pk_parse_key(&pkey, (unsigned char *)privatekeydata, - privatekeydata_len, NULL, 0); - if( ret != 0 ) - { + + pwd_len = passphrase != NULL ? strlen((const char *)passphrase) : 0; + ret = mbedtls_pk_parse_key(&pkey, + (unsigned char *)privatekeydata_nullterm, + privatekeydata_len + 1, + (const unsigned char *)passphrase, pwd_len); + _libssh2_mbedtls_safe_free(privatekeydata_nullterm, privatekeydata_len); + + if(ret != 0) { mbedtls_strerror(ret, (char *)buf, sizeof(buf)); mbedtls_pk_free(&pkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, buf); @@ -603,4 +691,580 @@ void _libssh2_init_aes_ctr(void) { /* no implementation */ } + + +/*******************************************************************/ +/* + * mbedTLS backend: Diffie-Hellman functions + */ + +void +_libssh2_dh_init(_libssh2_dh_ctx *dhctx) +{ + *dhctx = _libssh2_mbedtls_bignum_init(); /* Random from client */ +} + +int +_libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, int group_order) +{ + /* Generate x and e */ + _libssh2_mbedtls_bignum_random(*dhctx, group_order * 8 - 1, 0, -1); + mbedtls_mpi_exp_mod(public, g, *dhctx, p, NULL); + return 0; +} + +int +_libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p) +{ + /* Compute the shared secret */ + mbedtls_mpi_exp_mod(secret, f, *dhctx, p, NULL); + return 0; +} + +void +_libssh2_dh_dtor(_libssh2_dh_ctx *dhctx) +{ + _libssh2_mbedtls_bignum_free(*dhctx); + *dhctx = NULL; +} + +#if LIBSSH2_ECDSA + +/*******************************************************************/ +/* + * mbedTLS backend: ECDSA functions + */ + +/* + * _libssh2_ecdsa_create_key + * + * Creates a local private key based on input curve + * and returns octal value and octal length + * + */ + +int +_libssh2_mbedtls_ecdsa_create_key(LIBSSH2_SESSION *session, + _libssh2_ec_key **privkey, + unsigned char **pubkey_oct, + size_t *pubkey_oct_len, + libssh2_curve_type curve) +{ + size_t plen = 0; + + *privkey = LIBSSH2_ALLOC(session, sizeof(mbedtls_ecp_keypair)); + + if(*privkey == NULL) + goto failed; + + mbedtls_ecdsa_init(*privkey); + + if(mbedtls_ecdsa_genkey(*privkey, (mbedtls_ecp_group_id)curve, + mbedtls_ctr_drbg_random, + &_libssh2_mbedtls_ctr_drbg) != 0) + goto failed; + + plen = 2 * mbedtls_mpi_size(&(*privkey)->grp.P) + 1; + *pubkey_oct = LIBSSH2_ALLOC(session, plen); + + if(*pubkey_oct == NULL) + goto failed; + + if(mbedtls_ecp_point_write_binary(&(*privkey)->grp, &(*privkey)->Q, + MBEDTLS_ECP_PF_UNCOMPRESSED, + pubkey_oct_len, *pubkey_oct, plen) == 0) + return 0; + +failed: + + _libssh2_mbedtls_ecdsa_free(*privkey); + _libssh2_mbedtls_safe_free(*pubkey_oct, plen); + *privkey = NULL; + + return -1; +} + +/* _libssh2_ecdsa_curve_name_with_octal_new + * + * Creates a new public key given an octal string, length and type + * + */ + +int +_libssh2_mbedtls_ecdsa_curve_name_with_octal_new(libssh2_ecdsa_ctx **ctx, + const unsigned char *k, + size_t k_len, + libssh2_curve_type curve) +{ + *ctx = mbedtls_calloc(1, sizeof(mbedtls_ecp_keypair)); + + if(*ctx == NULL) + goto failed; + + mbedtls_ecdsa_init(*ctx); + + if(mbedtls_ecp_group_load(&(*ctx)->grp, (mbedtls_ecp_group_id)curve) != 0) + goto failed; + + if(mbedtls_ecp_point_read_binary(&(*ctx)->grp, &(*ctx)->Q, k, k_len) != 0) + goto failed; + + if(mbedtls_ecp_check_pubkey(&(*ctx)->grp, &(*ctx)->Q) == 0) + return 0; + +failed: + + _libssh2_mbedtls_ecdsa_free(*ctx); + *ctx = NULL; + + return -1; +} + +/* _libssh2_ecdh_gen_k + * + * Computes the shared secret K given a local private key, + * remote public key and length + */ + +int +_libssh2_mbedtls_ecdh_gen_k(_libssh2_bn **k, + _libssh2_ec_key *privkey, + const unsigned char *server_pubkey, + size_t server_pubkey_len) +{ + mbedtls_ecp_point pubkey; + int rc = 0; + + if(*k == NULL) + return -1; + + mbedtls_ecp_point_init(&pubkey); + + if(mbedtls_ecp_point_read_binary(&privkey->grp, &pubkey, + server_pubkey, server_pubkey_len) != 0) { + rc = -1; + goto cleanup; + } + + if(mbedtls_ecdh_compute_shared(&privkey->grp, *k, + &pubkey, &privkey->d, + mbedtls_ctr_drbg_random, + &_libssh2_mbedtls_ctr_drbg) != 0) { + rc = -1; + goto cleanup; + } + + if(mbedtls_ecp_check_privkey(&privkey->grp, *k) != 0) + rc = -1; + +cleanup: + + mbedtls_ecp_point_free(&pubkey); + + return rc; +} + +#define LIBSSH2_MBEDTLS_ECDSA_VERIFY(digest_type) \ +{ \ + unsigned char hsh[SHA##digest_type##_DIGEST_LENGTH]; \ + \ + if(libssh2_sha##digest_type(m, m_len, hsh) == 0) { \ + rc = mbedtls_ecdsa_verify(&ctx->grp, hsh, \ + SHA##digest_type##_DIGEST_LENGTH, \ + &ctx->Q, &pr, &ps); \ + } \ + \ +} + +/* _libssh2_ecdsa_sign + * + * Verifies the ECDSA signature of a hashed message + * + */ + +int +_libssh2_mbedtls_ecdsa_verify(libssh2_ecdsa_ctx *ctx, + const unsigned char *r, size_t r_len, + const unsigned char *s, size_t s_len, + const unsigned char *m, size_t m_len) +{ + mbedtls_mpi pr, ps; + int rc = -1; + + mbedtls_mpi_init(&pr); + mbedtls_mpi_init(&ps); + + if(mbedtls_mpi_read_binary(&pr, r, r_len) != 0) + goto cleanup; + + if(mbedtls_mpi_read_binary(&ps, s, s_len) != 0) + goto cleanup; + + switch(_libssh2_ecdsa_get_curve_type(ctx)) { + case LIBSSH2_EC_CURVE_NISTP256: + LIBSSH2_MBEDTLS_ECDSA_VERIFY(256); + break; + case LIBSSH2_EC_CURVE_NISTP384: + LIBSSH2_MBEDTLS_ECDSA_VERIFY(384); + break; + case LIBSSH2_EC_CURVE_NISTP521: + LIBSSH2_MBEDTLS_ECDSA_VERIFY(512); + break; + default: + rc = -1; + } + +cleanup: + + mbedtls_mpi_free(&pr); + mbedtls_mpi_free(&ps); + + return (rc == 0) ? 0 : -1; +} + +static int +_libssh2_mbedtls_parse_eckey(libssh2_ecdsa_ctx **ctx, + mbedtls_pk_context *pkey, + LIBSSH2_SESSION *session, + const unsigned char *data, + size_t data_len, + const unsigned char *pwd) +{ + size_t pwd_len; + + pwd_len = pwd ? strlen((const char *) pwd) : 0; + + if(mbedtls_pk_parse_key(pkey, data, data_len, pwd, pwd_len) != 0) + goto failed; + + if(mbedtls_pk_get_type(pkey) != MBEDTLS_PK_ECKEY) + goto failed; + + *ctx = LIBSSH2_ALLOC(session, sizeof(libssh2_ecdsa_ctx)); + + if(*ctx == NULL) + goto failed; + + mbedtls_ecdsa_init(*ctx); + + if(mbedtls_ecdsa_from_keypair(*ctx, mbedtls_pk_ec(*pkey)) == 0) + return 0; + +failed: + + _libssh2_mbedtls_ecdsa_free(*ctx); + *ctx = NULL; + + return -1; +} + +static int +_libssh2_mbedtls_parse_openssh_key(libssh2_ecdsa_ctx **ctx, + LIBSSH2_SESSION *session, + const unsigned char *data, + size_t data_len, + const unsigned char *pwd) +{ + libssh2_curve_type type; + unsigned char *name = NULL; + struct string_buf *decrypted = NULL; + size_t curvelen, exponentlen, pointlen; + unsigned char *curve, *exponent, *point_buf; + + if(_libssh2_openssh_pem_parse_memory(session, pwd, + (const char *)data, data_len, + &decrypted) != 0) + goto failed; + + if(_libssh2_get_string(decrypted, &name, NULL) != 0) + goto failed; + + if(_libssh2_mbedtls_ecdsa_curve_type_from_name((const char *)name, + &type) != 0) + goto failed; + + if(_libssh2_get_string(decrypted, &curve, &curvelen) != 0) + goto failed; + + if(_libssh2_get_string(decrypted, &point_buf, &pointlen) != 0) + goto failed; + + if(_libssh2_get_bignum_bytes(decrypted, &exponent, &exponentlen) != 0) + goto failed; + + *ctx = LIBSSH2_ALLOC(session, sizeof(libssh2_ecdsa_ctx)); + + if(*ctx == NULL) + goto failed; + + mbedtls_ecdsa_init(*ctx); + + if(mbedtls_ecp_group_load(&(*ctx)->grp, (mbedtls_ecp_group_id)type) != 0) + goto failed; + + if(mbedtls_mpi_read_binary(&(*ctx)->d, exponent, exponentlen) != 0) + goto failed; + + if(mbedtls_ecp_mul(&(*ctx)->grp, &(*ctx)->Q, + &(*ctx)->d, &(*ctx)->grp.G, + mbedtls_ctr_drbg_random, + &_libssh2_mbedtls_ctr_drbg) != 0) + goto failed; + + if(mbedtls_ecp_check_privkey(&(*ctx)->grp, &(*ctx)->d) == 0) + goto cleanup; + +failed: + + _libssh2_mbedtls_ecdsa_free(*ctx); + *ctx = NULL; + +cleanup: + + if(decrypted) { + _libssh2_string_buf_free(session, decrypted); + } + + return (*ctx == NULL) ? -1 : 0; +} + +/* _libssh2_ecdsa_new_private + * + * Creates a new private key given a file path and password + * + */ + +int +_libssh2_mbedtls_ecdsa_new_private(libssh2_ecdsa_ctx **ctx, + LIBSSH2_SESSION *session, + const char *filename, + const unsigned char *pwd) +{ + mbedtls_pk_context pkey; + unsigned char *data; + size_t data_len; + + if(mbedtls_pk_load_file(filename, &data, &data_len) != 0) + goto cleanup; + + mbedtls_pk_init(&pkey); + + if(_libssh2_mbedtls_parse_eckey(ctx, &pkey, session, + data, data_len, pwd) == 0) + goto cleanup; + + _libssh2_mbedtls_parse_openssh_key(ctx, session, data, data_len, pwd); + +cleanup: + + mbedtls_pk_free(&pkey); + + _libssh2_mbedtls_safe_free(data, data_len); + + return (*ctx == NULL) ? -1 : 0; +} + +/* _libssh2_ecdsa_new_private + * + * Creates a new private key given a file data and password + * + */ + +int +_libssh2_mbedtls_ecdsa_new_private_frommemory(libssh2_ecdsa_ctx **ctx, + LIBSSH2_SESSION *session, + const char *data, + size_t data_len, + const unsigned char *pwd) +{ + unsigned char *ntdata; + mbedtls_pk_context pkey; + + mbedtls_pk_init(&pkey); + + ntdata = LIBSSH2_ALLOC(session, data_len + 1); + + if(ntdata == NULL) + goto cleanup; + + memcpy(ntdata, data, data_len); + + if(_libssh2_mbedtls_parse_eckey(ctx, &pkey, session, + ntdata, data_len + 1, pwd) == 0) + goto cleanup; + + _libssh2_mbedtls_parse_openssh_key(ctx, session, + ntdata, data_len + 1, pwd); + +cleanup: + + mbedtls_pk_free(&pkey); + + _libssh2_mbedtls_safe_free(ntdata, data_len); + + return (*ctx == NULL) ? -1 : 0; +} + +static unsigned char * +_libssh2_mbedtls_mpi_write_binary(unsigned char *buf, + const mbedtls_mpi *mpi, + size_t bytes) +{ + unsigned char *p = buf; + + if(sizeof(&p) / sizeof(p[0]) < 4) { + goto done; + } + + p += 4; + *p = 0; + + if(bytes > 0) { + mbedtls_mpi_write_binary(mpi, p + 1, bytes - 1); + } + + if(bytes > 0 && !(*(p + 1) & 0x80)) { + memmove(p, p + 1, --bytes); + } + + _libssh2_htonu32(p - 4, bytes); + +done: + + return p + bytes; +} + +/* _libssh2_ecdsa_sign + * + * Computes the ECDSA signature of a previously-hashed message + * + */ + +int +_libssh2_mbedtls_ecdsa_sign(LIBSSH2_SESSION *session, + libssh2_ecdsa_ctx *ctx, + const unsigned char *hash, + unsigned long hash_len, + unsigned char **sign, + size_t *sign_len) +{ + size_t r_len, s_len, tmp_sign_len = 0; + unsigned char *sp, *tmp_sign = NULL; + mbedtls_mpi pr, ps; + + mbedtls_mpi_init(&pr); + mbedtls_mpi_init(&ps); + + if(mbedtls_ecdsa_sign(&ctx->grp, &pr, &ps, &ctx->d, + hash, hash_len, + mbedtls_ctr_drbg_random, + &_libssh2_mbedtls_ctr_drbg) != 0) + goto cleanup; + + r_len = mbedtls_mpi_size(&pr) + 1; + s_len = mbedtls_mpi_size(&ps) + 1; + tmp_sign_len = r_len + s_len + 8; + + tmp_sign = LIBSSH2_CALLOC(session, tmp_sign_len); + + if(tmp_sign == NULL) + goto cleanup; + + sp = tmp_sign; + sp = _libssh2_mbedtls_mpi_write_binary(sp, &pr, r_len); + sp = _libssh2_mbedtls_mpi_write_binary(sp, &ps, s_len); + + *sign_len = (size_t)(sp - tmp_sign); + + *sign = LIBSSH2_CALLOC(session, *sign_len); + + if(*sign == NULL) + goto cleanup; + + memcpy(*sign, tmp_sign, *sign_len); + +cleanup: + + mbedtls_mpi_free(&pr); + mbedtls_mpi_free(&ps); + + _libssh2_mbedtls_safe_free(tmp_sign, tmp_sign_len); + + return (*sign == NULL) ? -1 : 0; +} + +/* _libssh2_ecdsa_get_curve_type + * + * returns key curve type that maps to libssh2_curve_type + * + */ + +libssh2_curve_type +_libssh2_mbedtls_ecdsa_get_curve_type(libssh2_ecdsa_ctx *ctx) +{ + return (libssh2_curve_type) ctx->grp.id; +} + +/* _libssh2_ecdsa_curve_type_from_name + * + * returns 0 for success, key curve type that maps to libssh2_curve_type + * + */ + +int +_libssh2_mbedtls_ecdsa_curve_type_from_name(const char *name, + libssh2_curve_type *out_type) +{ + int ret = 0; + libssh2_curve_type type; + + if(name == NULL || strlen(name) != 19) + return -1; + + if(strcmp(name, "ecdsa-sha2-nistp256") == 0) + type = LIBSSH2_EC_CURVE_NISTP256; + else if(strcmp(name, "ecdsa-sha2-nistp384") == 0) + type = LIBSSH2_EC_CURVE_NISTP384; + else if(strcmp(name, "ecdsa-sha2-nistp521") == 0) + type = LIBSSH2_EC_CURVE_NISTP521; + else { + ret = -1; + } + + if(ret == 0 && out_type) { + *out_type = type; + } + + return ret; +} + +void +_libssh2_mbedtls_ecdsa_free(libssh2_ecdsa_ctx *ctx) +{ + mbedtls_ecdsa_free(ctx); + mbedtls_free(ctx); +} + + +/* _libssh2_supported_key_sign_algorithms + * + * Return supported key hash algo upgrades, see crypto.h + * + */ + +const char * +_libssh2_supported_key_sign_algorithms(LIBSSH2_SESSION *session, + unsigned char *key_method, + size_t key_method_len) +{ + (void)session; + (void)key_method; + (void)key_method_len; + + return NULL; +} + +#endif /* LIBSSH2_ECDSA */ #endif /* LIBSSH2_MBEDTLS */ diff --git a/vendor/libssh2/src/mbedtls.h b/vendor/libssh2/src/mbedtls.h index 248583ed3c..0450113f06 100644 --- a/vendor/libssh2/src/mbedtls.h +++ b/vendor/libssh2/src/mbedtls.h @@ -1,3 +1,42 @@ +#ifndef __LIBSSH2_MBEDTLS_H +#define __LIBSSH2_MBEDTLS_H +/* Copyright (c) 2016, Art + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * Neither the name of the copyright holder nor the names + * of any other contributors may be used to endorse or + * promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + #include #include @@ -6,6 +45,12 @@ #include #include #include +#ifdef MBEDTLS_ECDH_C +# include +#endif +#ifdef MBEDTLS_ECDSA_C +# include +#endif #include #include #include @@ -26,20 +71,23 @@ #define LIBSSH2_3DES 1 #define LIBSSH2_RSA 1 +#define LIBSSH2_RSA_SHA2 0 #define LIBSSH2_DSA 0 +#ifdef MBEDTLS_ECDSA_C +# define LIBSSH2_ECDSA 1 +#else +# define LIBSSH2_ECDSA 0 +#endif +#define LIBSSH2_ED25519 0 #define MD5_DIGEST_LENGTH 16 #define SHA_DIGEST_LENGTH 20 #define SHA256_DIGEST_LENGTH 32 +#define SHA384_DIGEST_LENGTH 48 #define SHA512_DIGEST_LENGTH 64 -/*******************************************************************/ -/* - * mbedTLS backend: Global context handles - */ +#define EC_MAX_POINT_LEN ((528 * 2 / 8) + 1) -mbedtls_entropy_context _libssh2_mbedtls_entropy; -mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; /*******************************************************************/ /* @@ -80,6 +128,8 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; _libssh2_mbedtls_hash_init(pctx, MBEDTLS_MD_RIPEMD160, key, keylen) #define libssh2_hmac_sha256_init(pctx, key, keylen) \ _libssh2_mbedtls_hash_init(pctx, MBEDTLS_MD_SHA256, key, keylen) +#define libssh2_hmac_sha384_init(pctx, key, keylen) \ + _libssh2_mbedtls_hash_init(pctx, MBEDTLS_MD_SHA384, key, keylen) #define libssh2_hmac_sha512_init(pctx, key, keylen) \ _libssh2_mbedtls_hash_init(pctx, MBEDTLS_MD_SHA512, key, keylen) @@ -117,6 +167,23 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; _libssh2_mbedtls_hash(data, datalen, MBEDTLS_MD_SHA256, hash) +/*******************************************************************/ +/* + * mbedTLS backend: SHA384 functions + */ + +#define libssh2_sha384_ctx mbedtls_md_context_t + +#define libssh2_sha384_init(pctx) \ + _libssh2_mbedtls_hash_init(pctx, MBEDTLS_MD_SHA384, NULL, 0) +#define libssh2_sha384_update(ctx, data, datalen) \ + mbedtls_md_update(&ctx, (unsigned char *) data, datalen) +#define libssh2_sha384_final(ctx, hash) \ + _libssh2_mbedtls_hash_final(&ctx, hash) +#define libssh2_sha384(data, datalen, hash) \ + _libssh2_mbedtls_hash(data, datalen, MBEDTLS_MD_SHA384, hash) + + /*******************************************************************/ /* * mbedTLS backend: SHA512 functions @@ -150,9 +217,10 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; #define libssh2_md5(data, datalen, hash) \ _libssh2_mbedtls_hash(data, datalen, MBEDTLS_MD_MD5, hash) + /*******************************************************************/ /* - * mbedTLS backend: RSA structure + * mbedTLS backend: RSA functions */ #define libssh2_rsa_ctx mbedtls_rsa_context @@ -181,6 +249,82 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; #define _libssh2_rsa_free(rsactx) \ _libssh2_mbedtls_rsa_free(rsactx) + +/*******************************************************************/ +/* + * mbedTLS backend: ECDSA structures + */ + +#if LIBSSH2_ECDSA + +typedef enum { +#ifdef MBEDTLS_ECP_DP_SECP256R1_ENABLED + LIBSSH2_EC_CURVE_NISTP256 = MBEDTLS_ECP_DP_SECP256R1, +#else + LIBSSH2_EC_CURVE_NISTP256 = MBEDTLS_ECP_DP_NONE, +#endif +#ifdef MBEDTLS_ECP_DP_SECP384R1_ENABLED + LIBSSH2_EC_CURVE_NISTP384 = MBEDTLS_ECP_DP_SECP384R1, +#else + LIBSSH2_EC_CURVE_NISTP384 = MBEDTLS_ECP_DP_NONE, +#endif +#ifdef MBEDTLS_ECP_DP_SECP521R1_ENABLED + LIBSSH2_EC_CURVE_NISTP521 = MBEDTLS_ECP_DP_SECP521R1 +#else + LIBSSH2_EC_CURVE_NISTP521 = MBEDTLS_ECP_DP_NONE, +#endif +} libssh2_curve_type; + +# define _libssh2_ec_key mbedtls_ecp_keypair +#else +# define _libssh2_ec_key void +#endif /* LIBSSH2_ECDSA */ + + +/*******************************************************************/ +/* + * mbedTLS backend: ECDSA functions + */ + +#if LIBSSH2_ECDSA + +#define libssh2_ecdsa_ctx mbedtls_ecdsa_context + +#define _libssh2_ecdsa_create_key(session, privkey, pubkey_octal, \ + pubkey_octal_len, curve) \ + _libssh2_mbedtls_ecdsa_create_key(session, privkey, pubkey_octal, \ + pubkey_octal_len, curve) + +#define _libssh2_ecdsa_curve_name_with_octal_new(ctx, k, k_len, curve) \ + _libssh2_mbedtls_ecdsa_curve_name_with_octal_new(ctx, k, k_len, curve) + +#define _libssh2_ecdh_gen_k(k, privkey, server_pubkey, server_pubkey_len) \ + _libssh2_mbedtls_ecdh_gen_k(k, privkey, server_pubkey, server_pubkey_len) + +#define _libssh2_ecdsa_verify(ctx, r, r_len, s, s_len, m, m_len) \ + _libssh2_mbedtls_ecdsa_verify(ctx, r, r_len, s, s_len, m, m_len) + +#define _libssh2_ecdsa_new_private(ctx, session, filename, passphrase) \ + _libssh2_mbedtls_ecdsa_new_private(ctx, session, filename, passphrase) + +#define _libssh2_ecdsa_new_private_frommemory(ctx, session, filedata, \ + filedata_len, passphrase) \ + _libssh2_mbedtls_ecdsa_new_private_frommemory(ctx, session, filedata, \ + filedata_len, passphrase) + +#define _libssh2_ecdsa_sign(session, ctx, hash, hash_len, sign, sign_len) \ + _libssh2_mbedtls_ecdsa_sign(session, ctx, hash, hash_len, sign, sign_len) + +#define _libssh2_ecdsa_get_curve_type(ctx) \ + _libssh2_mbedtls_ecdsa_get_curve_type(ctx) + +#define _libssh2_ecdsa_free(ctx) \ + _libssh2_mbedtls_ecdsa_free(ctx) + +#endif /* LIBSSH2_ECDSA */ + + +/*******************************************************************/ /* * mbedTLS backend: Key functions */ @@ -193,10 +337,11 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; pk, pk_len, pw) - /*******************************************************************/ +/*******************************************************************/ /* * mbedTLS backend: Cipher Context structure */ + #define _libssh2_cipher_ctx mbedtls_cipher_context_t #define _libssh2_cipher_type(algo) mbedtls_cipher_type_t algo @@ -212,6 +357,8 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; #define _libssh2_cipher_cast5 MBEDTLS_CIPHER_NULL #define _libssh2_cipher_3des MBEDTLS_CIPHER_DES_EDE3_CBC + +/*******************************************************************/ /* * mbedTLS backend: Cipher functions */ @@ -239,10 +386,6 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; _libssh2_mbedtls_bignum_init() #define _libssh2_bn_init_from_bin() \ _libssh2_mbedtls_bignum_init() -#define _libssh2_bn_rand(bn, bits, top, bottom) \ - _libssh2_mbedtls_bignum_random(bn, bits, top, bottom) -#define _libssh2_bn_mod_exp(r, a, p, m, ctx) \ - mbedtls_mpi_exp_mod(r, a, p, m, NULL) #define _libssh2_bn_set_word(bn, word) \ mbedtls_mpi_lset(bn, word) #define _libssh2_bn_from_bin(bn, len, bin) \ @@ -254,13 +397,28 @@ mbedtls_ctr_drbg_context _libssh2_mbedtls_ctr_drbg; #define _libssh2_bn_bits(bn) \ mbedtls_mpi_bitlen(bn) #define _libssh2_bn_free(bn) \ - mbedtls_mpi_free(bn) + _libssh2_mbedtls_bignum_free(bn) + + +/*******************************************************************/ +/* + * mbedTLS backend: Diffie-Hellman support. + */ + +#define _libssh2_dh_ctx mbedtls_mpi * +#define libssh2_dh_init(dhctx) _libssh2_dh_init(dhctx) +#define libssh2_dh_key_pair(dhctx, public, g, p, group_order, bnctx) \ + _libssh2_dh_key_pair(dhctx, public, g, p, group_order) +#define libssh2_dh_secret(dhctx, secret, f, p, bnctx) \ + _libssh2_dh_secret(dhctx, secret, f, p) +#define libssh2_dh_dtor(dhctx) _libssh2_dh_dtor(dhctx) /*******************************************************************/ /* * mbedTLS backend: forward declarations */ + void _libssh2_mbedtls_init(void); @@ -302,9 +460,6 @@ _libssh2_mbedtls_bignum_init(void); void _libssh2_mbedtls_bignum_free(_libssh2_bn *bn); -int -_libssh2_mbedtls_bignum_random(_libssh2_bn *bn, int bits, int top, int bottom); - int _libssh2_mbedtls_rsa_new(libssh2_rsa_ctx **rsa, const unsigned char *edata, @@ -369,3 +524,64 @@ _libssh2_mbedtls_pub_priv_keyfilememory(LIBSSH2_SESSION *session, const char *privatekeydata, size_t privatekeydata_len, const char *passphrase); +#if LIBSSH2_ECDSA +int +_libssh2_mbedtls_ecdsa_create_key(LIBSSH2_SESSION *session, + _libssh2_ec_key **privkey, + unsigned char **pubkey_octal, + size_t *pubkey_octal_len, + libssh2_curve_type curve); +int +_libssh2_mbedtls_ecdsa_curve_name_with_octal_new(libssh2_ecdsa_ctx **ctx, + const unsigned char *k, + size_t k_len, + libssh2_curve_type curve); +int +_libssh2_mbedtls_ecdh_gen_k(_libssh2_bn **k, + _libssh2_ec_key *privkey, + const unsigned char *server_pubkey, + size_t server_pubkey_len); +int +_libssh2_mbedtls_ecdsa_verify(libssh2_ecdsa_ctx *ctx, + const unsigned char *r, size_t r_len, + const unsigned char *s, size_t s_len, + const unsigned char *m, size_t m_len); +int +_libssh2_mbedtls_ecdsa_new_private(libssh2_ecdsa_ctx **ctx, + LIBSSH2_SESSION *session, + const char *filename, + const unsigned char *passphrase); +int +_libssh2_mbedtls_ecdsa_new_private_frommemory(libssh2_ecdsa_ctx **ctx, + LIBSSH2_SESSION *session, + const char *filedata, + size_t filedata_len, + const unsigned char *passphrase); +int +_libssh2_mbedtls_ecdsa_sign(LIBSSH2_SESSION *session, + libssh2_ecdsa_ctx *ctx, + const unsigned char *hash, + unsigned long hash_len, + unsigned char **signature, + size_t *signature_len); +libssh2_curve_type +_libssh2_mbedtls_ecdsa_key_get_curve_type(libssh2_ecdsa_ctx *ctx); +int +_libssh2_mbedtls_ecdsa_curve_type_from_name(const char *name, + libssh2_curve_type *type); +void +_libssh2_mbedtls_ecdsa_free(libssh2_ecdsa_ctx *ctx); +#endif /* LIBSSH2_ECDSA */ + +extern void +_libssh2_dh_init(_libssh2_dh_ctx *dhctx); +extern int +_libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, int group_order); +extern int +_libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p); +extern void +_libssh2_dh_dtor(_libssh2_dh_ctx *dhctx); + +#endif /* __LIBSSH2_MBEDTLS_H */ diff --git a/vendor/libssh2/src/misc.c b/vendor/libssh2/src/misc.c index f7faae7b6a..594b2d1f77 100644 --- a/vendor/libssh2/src/misc.c +++ b/vendor/libssh2/src/misc.c @@ -1,5 +1,5 @@ /* Copyright (c) 2004-2007 Sara Golemon - * Copyright (c) 2009-2014 by Daniel Stenberg + * Copyright (c) 2009-2019 by Daniel Stenberg * Copyright (c) 2010 Simon Josefsson * All rights reserved. * @@ -39,6 +39,11 @@ #include "libssh2_priv.h" #include "misc.h" +#include "blf.h" + +#ifdef HAVE_STDLIB_H +#include +#endif #ifdef HAVE_UNISTD_H #include @@ -48,21 +53,28 @@ #include #endif +#if defined(HAVE_DECL_SECUREZEROMEMORY) && HAVE_DECL_SECUREZEROMEMORY +#ifdef HAVE_WINDOWS_H +#include +#endif +#endif + #include #include -int _libssh2_error_flags(LIBSSH2_SESSION* session, int errcode, const char* errmsg, int errflags) +int _libssh2_error_flags(LIBSSH2_SESSION* session, int errcode, + const char *errmsg, int errflags) { - if (session->err_flags & LIBSSH2_ERR_FLAG_DUP) + if(session->err_flags & LIBSSH2_ERR_FLAG_DUP) LIBSSH2_FREE(session, (char *)session->err_msg); session->err_code = errcode; session->err_flags = 0; - if ((errmsg != NULL) && ((errflags & LIBSSH2_ERR_FLAG_DUP) != 0)) { + if((errmsg != NULL) && ((errflags & LIBSSH2_ERR_FLAG_DUP) != 0)) { size_t len = strlen(errmsg); char *copy = LIBSSH2_ALLOC(session, len + 1); - if (copy) { + if(copy) { memcpy(copy, errmsg, len + 1); session->err_flags = LIBSSH2_ERR_FLAG_DUP; session->err_msg = copy; @@ -86,7 +98,7 @@ int _libssh2_error_flags(LIBSSH2_SESSION* session, int errcode, const char* errm return errcode; } -int _libssh2_error(LIBSSH2_SESSION* session, int errcode, const char* errmsg) +int _libssh2_error(LIBSSH2_SESSION* session, int errcode, const char *errmsg) { return _libssh2_error_flags(session, errcode, errmsg, 0); } @@ -94,7 +106,7 @@ int _libssh2_error(LIBSSH2_SESSION* session, int errcode, const char* errmsg) #ifdef WIN32 static int wsa2errno(void) { - switch (WSAGetLastError()) { + switch(WSAGetLastError()) { case WSAEWOULDBLOCK: return EAGAIN; @@ -127,21 +139,18 @@ _libssh2_recv(libssh2_socket_t sock, void *buffer, size_t length, rc = recv(sock, buffer, length, flags); #ifdef WIN32 - if (rc < 0 ) + if(rc < 0) return -wsa2errno(); -#elif defined(__VMS) - if (rc < 0 ){ - if ( errno == EWOULDBLOCK ) - return -EAGAIN; - else - return -errno; - } #else - if (rc < 0 ){ + if(rc < 0) { /* Sometimes the first recv() function call sets errno to ENOENT on Solaris and HP-UX */ - if ( errno == ENOENT ) + if(errno == ENOENT) return -EAGAIN; +#ifdef EWOULDBLOCK /* For VMS and other special unixes */ + else if(errno == EWOULDBLOCK) + return -EAGAIN; +#endif else return -errno; } @@ -163,18 +172,16 @@ _libssh2_send(libssh2_socket_t sock, const void *buffer, size_t length, rc = send(sock, buffer, length, flags); #ifdef WIN32 - if (rc < 0 ) + if(rc < 0) return -wsa2errno(); -#elif defined(__VMS) - if (rc < 0 ) { - if ( errno == EWOULDBLOCK ) - return -EAGAIN; - else - return -errno; - } #else - if (rc < 0 ) - return -errno; + if(rc < 0) { +#ifdef EWOULDBLOCK /* For VMS and other special unixes */ + if(errno == EWOULDBLOCK) + return -EAGAIN; +#endif + return -errno; + } #endif return rc; } @@ -184,7 +191,10 @@ _libssh2_send(libssh2_socket_t sock, const void *buffer, size_t length, unsigned int _libssh2_ntohu32(const unsigned char *buf) { - return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + return (((unsigned int)buf[0] << 24) + | ((unsigned int)buf[1] << 16) + | ((unsigned int)buf[2] << 8) + | ((unsigned int)buf[3])); } @@ -269,15 +279,16 @@ libssh2_base64_decode(LIBSSH2_SESSION *session, char **data, *data = LIBSSH2_ALLOC(session, (3 * src_len / 4) + 1); d = (unsigned char *) *data; - if (!d) { + if(!d) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for base64 decoding"); } for(s = (unsigned char *) src; ((char *) s) < (src + src_len); s++) { - if ((v = base64_reverse_table[*s]) < 0) + v = base64_reverse_table[*s]; + if(v < 0) continue; - switch (i % 4) { + switch(i % 4) { case 0: d[len] = (unsigned char)(v << 2); break; @@ -295,10 +306,11 @@ libssh2_base64_decode(LIBSSH2_SESSION *session, char **data, } i++; } - if ((i % 4) == 1) { + if((i % 4) == 1) { /* Invalid -- We have a byte which belongs exclusively to a partial octet */ LIBSSH2_FREE(session, *data); + *data = NULL; return _libssh2_error(session, LIBSSH2_ERROR_INVAL, "Invalid base64"); } @@ -321,68 +333,69 @@ static const char table64[]= size_t _libssh2_base64_encode(LIBSSH2_SESSION *session, const char *inp, size_t insize, char **outptr) { - unsigned char ibuf[3]; - unsigned char obuf[4]; - int i; - int inputparts; - char *output; - char *base64data; - const char *indata = inp; - - *outptr = NULL; /* set to NULL in case of failure before we reach the end */ - - if(0 == insize) - insize = strlen(indata); - - base64data = output = LIBSSH2_ALLOC(session, insize*4/3+4); - if(NULL == output) - return 0; - - while(insize > 0) { - for (i = inputparts = 0; i < 3; i++) { - if(insize > 0) { - inputparts++; - ibuf[i] = *indata; - indata++; - insize--; - } - else - ibuf[i] = 0; - } + unsigned char ibuf[3]; + unsigned char obuf[4]; + int i; + int inputparts; + char *output; + char *base64data; + const char *indata = inp; + + *outptr = NULL; /* set to NULL in case of failure before we reach the + end */ + + if(0 == insize) + insize = strlen(indata); + + base64data = output = LIBSSH2_ALLOC(session, insize * 4 / 3 + 4); + if(NULL == output) + return 0; + + while(insize > 0) { + for(i = inputparts = 0; i < 3; i++) { + if(insize > 0) { + inputparts++; + ibuf[i] = *indata; + indata++; + insize--; + } + else + ibuf[i] = 0; + } - obuf[0] = (unsigned char) ((ibuf[0] & 0xFC) >> 2); - obuf[1] = (unsigned char) (((ibuf[0] & 0x03) << 4) | \ - ((ibuf[1] & 0xF0) >> 4)); - obuf[2] = (unsigned char) (((ibuf[1] & 0x0F) << 2) | \ - ((ibuf[2] & 0xC0) >> 6)); - obuf[3] = (unsigned char) (ibuf[2] & 0x3F); - - switch(inputparts) { - case 1: /* only one byte read */ - snprintf(output, 5, "%c%c==", - table64[obuf[0]], - table64[obuf[1]]); - break; - case 2: /* two bytes read */ - snprintf(output, 5, "%c%c%c=", - table64[obuf[0]], - table64[obuf[1]], - table64[obuf[2]]); - break; - default: - snprintf(output, 5, "%c%c%c%c", - table64[obuf[0]], - table64[obuf[1]], - table64[obuf[2]], - table64[obuf[3]] ); - break; + obuf[0] = (unsigned char) ((ibuf[0] & 0xFC) >> 2); + obuf[1] = (unsigned char) (((ibuf[0] & 0x03) << 4) | \ + ((ibuf[1] & 0xF0) >> 4)); + obuf[2] = (unsigned char) (((ibuf[1] & 0x0F) << 2) | \ + ((ibuf[2] & 0xC0) >> 6)); + obuf[3] = (unsigned char) (ibuf[2] & 0x3F); + + switch(inputparts) { + case 1: /* only one byte read */ + snprintf(output, 5, "%c%c==", + table64[obuf[0]], + table64[obuf[1]]); + break; + case 2: /* two bytes read */ + snprintf(output, 5, "%c%c%c=", + table64[obuf[0]], + table64[obuf[1]], + table64[obuf[2]]); + break; + default: + snprintf(output, 5, "%c%c%c%c", + table64[obuf[0]], + table64[obuf[1]], + table64[obuf[2]], + table64[obuf[3]]); + break; + } + output += 4; } - output += 4; - } - *output=0; - *outptr = base64data; /* make it return the actual data memory */ + *output = 0; + *outptr = base64data; /* make it return the actual data memory */ - return strlen(base64data); /* return the length of the new data */ + return strlen(base64data); /* return the length of the new data */ } /* ---- End of Base64 Encoding ---- */ @@ -403,7 +416,7 @@ libssh2_trace(LIBSSH2_SESSION * session, int bitmask) } LIBSSH2_API int -libssh2_trace_sethandler(LIBSSH2_SESSION *session, void* handler_context, +libssh2_trace_sethandler(LIBSSH2_SESSION *session, void *handler_context, libssh2_trace_handler_func callback) { session->tracehandler = callback; @@ -431,18 +444,18 @@ _libssh2_debug(LIBSSH2_SESSION * session, int context, const char *format, ...) "Publickey", "Socket", }; - const char* contexttext = contexts[0]; + const char *contexttext = contexts[0]; unsigned int contextindex; - if (!(session->showmask & context)) { + if(!(session->showmask & context)) { /* no such output asked for */ return; } /* Find the first matching context string for this message */ - for (contextindex = 0; contextindex < ARRAY_SIZE(contexts); + for(contextindex = 0; contextindex < ARRAY_SIZE(contexts); contextindex++) { - if ((context & (1 << contextindex)) != 0) { + if((context & (1 << contextindex)) != 0) { contexttext = contexts[contextindex]; break; } @@ -457,7 +470,7 @@ _libssh2_debug(LIBSSH2_SESSION * session, int context, const char *format, ...) len = snprintf(buffer, buflen, "[libssh2] %d.%06d %s: ", (int)now.tv_sec, (int)now.tv_usec, contexttext); - if (len >= buflen) + if(len >= buflen) msglen = buflen - 1; else { buflen -= len; @@ -468,7 +481,7 @@ _libssh2_debug(LIBSSH2_SESSION * session, int context, const char *format, ...) msglen += len < buflen ? len : buflen - 1; } - if (session->tracehandler) + if(session->tracehandler) (session->tracehandler)(session, session->tracehandler_context, buffer, msglen); else @@ -485,7 +498,7 @@ libssh2_trace(LIBSSH2_SESSION * session, int bitmask) } LIBSSH2_API int -libssh2_trace_sethandler(LIBSSH2_SESSION *session, void* handler_context, +libssh2_trace_sethandler(LIBSSH2_SESSION *session, void *handler_context, libssh2_trace_handler_func callback) { (void) session; @@ -615,21 +628,20 @@ void _libssh2_list_insert(struct list_node *after, /* insert before this */ #define _W32_FT_OFFSET (116444736000000000) int __cdecl _libssh2_gettimeofday(struct timeval *tp, void *tzp) - { - union { - unsigned __int64 ns100; /*time since 1 Jan 1601 in 100ns units */ - FILETIME ft; - } _now; - (void)tzp; - if(tp) - { - GetSystemTimeAsFileTime (&_now.ft); - tp->tv_usec=(long)((_now.ns100 / 10) % 1000000 ); - tp->tv_sec= (long)((_now.ns100 - _W32_FT_OFFSET) / 10000000); +{ + union { + unsigned __int64 ns100; /*time since 1 Jan 1601 in 100ns units */ + FILETIME ft; + } _now; + (void)tzp; + if(tp) { + GetSystemTimeAsFileTime(&_now.ft); + tp->tv_usec = (long)((_now.ns100 / 10) % 1000000); + tp->tv_sec = (long)((_now.ns100 - _W32_FT_OFFSET) / 10000000); } - /* Always return 0 as per Open Group Base Specifications Issue 6. - Do not set errno on error. */ - return 0; + /* Always return 0 as per Open Group Base Specifications Issue 6. + Do not set errno on error. */ + return 0; } @@ -643,3 +655,218 @@ void *_libssh2_calloc(LIBSSH2_SESSION* session, size_t size) } return p; } + +/* XOR operation on buffers input1 and input2, result in output. + It is safe to use an input buffer as the output buffer. */ +void _libssh2_xor_data(unsigned char *output, + const unsigned char *input1, + const unsigned char *input2, + size_t length) +{ + size_t i; + + for(i = 0; i < length; i++) + *output++ = *input1++ ^ *input2++; +} + +/* Increments an AES CTR buffer to prepare it for use with the + next AES block. */ +void _libssh2_aes_ctr_increment(unsigned char *ctr, + size_t length) +{ + unsigned char *pc; + unsigned int val, carry; + + pc = ctr + length - 1; + carry = 1; + + while(pc >= ctr) { + val = (unsigned int)*pc + carry; + *pc-- = val & 0xFF; + carry = val >> 8; + } +} + +#ifdef WIN32 +static void * (__cdecl * const volatile memset_libssh)(void *, int, size_t) = + memset; +#else +static void * (* const volatile memset_libssh)(void *, int, size_t) = memset; +#endif + +void _libssh2_explicit_zero(void *buf, size_t size) +{ +#if defined(HAVE_DECL_SECUREZEROMEMORY) && HAVE_DECL_SECUREZEROMEMORY + SecureZeroMemory(buf, size); + (void)memset_libssh; /* Silence unused variable warning */ +#elif defined(HAVE_MEMSET_S) + (void)memset_s(buf, size, 0, size); + (void)memset_libssh; /* Silence unused variable warning */ +#else + memset_libssh(buf, 0, size); +#endif +} + +/* String buffer */ + +struct string_buf* _libssh2_string_buf_new(LIBSSH2_SESSION *session) +{ + struct string_buf *ret; + + ret = _libssh2_calloc(session, sizeof(*ret)); + if(ret == NULL) + return NULL; + + return ret; +} + +void _libssh2_string_buf_free(LIBSSH2_SESSION *session, struct string_buf *buf) +{ + if(buf == NULL) + return; + + if(buf->data != NULL) + LIBSSH2_FREE(session, buf->data); + + LIBSSH2_FREE(session, buf); + buf = NULL; +} + +int _libssh2_get_u32(struct string_buf *buf, uint32_t *out) +{ + if(!_libssh2_check_length(buf, 4)) { + return -1; + } + + *out = _libssh2_ntohu32(buf->dataptr); + buf->dataptr += 4; + return 0; +} + +int _libssh2_get_u64(struct string_buf *buf, libssh2_uint64_t *out) +{ + if(!_libssh2_check_length(buf, 8)) { + return -1; + } + + *out = _libssh2_ntohu64(buf->dataptr); + buf->dataptr += 8; + return 0; +} + +int _libssh2_match_string(struct string_buf *buf, const char *match) +{ + unsigned char *out; + size_t len = 0; + if(_libssh2_get_string(buf, &out, &len) || len != strlen(match) || + strncmp((char *)out, match, strlen(match)) != 0) { + return -1; + } + return 0; +} + +int _libssh2_get_string(struct string_buf *buf, unsigned char **outbuf, + size_t *outlen) +{ + uint32_t data_len; + if(_libssh2_get_u32(buf, &data_len) != 0) { + return -1; + } + if(!_libssh2_check_length(buf, data_len)) { + return -1; + } + *outbuf = buf->dataptr; + buf->dataptr += data_len; + + if(outlen) + *outlen = (size_t)data_len; + + return 0; +} + +int _libssh2_copy_string(LIBSSH2_SESSION *session, struct string_buf *buf, + unsigned char **outbuf, size_t *outlen) +{ + size_t str_len; + unsigned char *str; + + if(_libssh2_get_string(buf, &str, &str_len)) { + return -1; + } + + *outbuf = LIBSSH2_ALLOC(session, str_len); + if(*outbuf) { + memcpy(*outbuf, str, str_len); + } + else { + return -1; + } + + if(outlen) + *outlen = str_len; + + return 0; +} + +int _libssh2_get_bignum_bytes(struct string_buf *buf, unsigned char **outbuf, + size_t *outlen) +{ + uint32_t data_len; + uint32_t bn_len; + unsigned char *bnptr; + + if(_libssh2_get_u32(buf, &data_len)) { + return -1; + } + if(!_libssh2_check_length(buf, data_len)) { + return -1; + } + + bn_len = data_len; + bnptr = buf->dataptr; + + /* trim leading zeros */ + while(bn_len > 0 && *bnptr == 0x00) { + bn_len--; + bnptr++; + } + + *outbuf = bnptr; + buf->dataptr += data_len; + + if(outlen) + *outlen = (size_t)bn_len; + + return 0; +} + +/* Given the current location in buf, _libssh2_check_length ensures + callers can read the next len number of bytes out of the buffer + before reading the buffer content */ + +int _libssh2_check_length(struct string_buf *buf, size_t len) +{ + unsigned char *endp = &buf->data[buf->len]; + size_t left = endp - buf->dataptr; + return ((len <= left) && (left <= buf->len)); +} + +/* Wrappers */ + +int _libssh2_bcrypt_pbkdf(const char *pass, + size_t passlen, + const uint8_t *salt, + size_t saltlen, + uint8_t *key, + size_t keylen, + unsigned int rounds) +{ + /* defined in bcrypt_pbkdf.c */ + return bcrypt_pbkdf(pass, + passlen, + salt, + saltlen, + key, + keylen, + rounds); +} diff --git a/vendor/libssh2/src/misc.h b/vendor/libssh2/src/misc.h index 54ae5461d6..5481e666ca 100644 --- a/vendor/libssh2/src/misc.h +++ b/vendor/libssh2/src/misc.h @@ -1,6 +1,6 @@ #ifndef __LIBSSH2_MISC_H #define __LIBSSH2_MISC_H -/* Copyright (c) 2009-2014 by Daniel Stenberg +/* Copyright (c) 2009-2019 by Daniel Stenberg * * All rights reserved. * @@ -49,8 +49,15 @@ struct list_node { struct list_head *head; }; -int _libssh2_error_flags(LIBSSH2_SESSION* session, int errcode, const char* errmsg, int errflags); -int _libssh2_error(LIBSSH2_SESSION* session, int errcode, const char* errmsg); +struct string_buf { + unsigned char *data; + unsigned char *dataptr; + size_t len; +}; + +int _libssh2_error_flags(LIBSSH2_SESSION* session, int errcode, + const char *errmsg, int errflags); +int _libssh2_error(LIBSSH2_SESSION* session, int errcode, const char *errmsg); void _libssh2_list_init(struct list_head *head); @@ -70,7 +77,7 @@ void *_libssh2_list_prev(struct list_node *node); /* remove this node from the list */ void _libssh2_list_remove(struct list_node *entry); -size_t _libssh2_base64_encode(struct _LIBSSH2_SESSION *session, +size_t _libssh2_base64_encode(LIBSSH2_SESSION *session, const char *inp, size_t insize, char **outptr); unsigned int _libssh2_ntohu32(const unsigned char *buf); @@ -78,7 +85,22 @@ libssh2_uint64_t _libssh2_ntohu64(const unsigned char *buf); void _libssh2_htonu32(unsigned char *buf, uint32_t val); void _libssh2_store_u32(unsigned char **buf, uint32_t value); void _libssh2_store_str(unsigned char **buf, const char *str, size_t len); -void *_libssh2_calloc(LIBSSH2_SESSION* session, size_t size); +void *_libssh2_calloc(LIBSSH2_SESSION *session, size_t size); +void _libssh2_explicit_zero(void *buf, size_t size); + +struct string_buf* _libssh2_string_buf_new(LIBSSH2_SESSION *session); +void _libssh2_string_buf_free(LIBSSH2_SESSION *session, + struct string_buf *buf); +int _libssh2_get_u32(struct string_buf *buf, uint32_t *out); +int _libssh2_get_u64(struct string_buf *buf, libssh2_uint64_t *out); +int _libssh2_match_string(struct string_buf *buf, const char *match); +int _libssh2_get_string(struct string_buf *buf, unsigned char **outbuf, + size_t *outlen); +int _libssh2_copy_string(LIBSSH2_SESSION* session, struct string_buf *buf, + unsigned char **outbuf, size_t *outlen); +int _libssh2_get_bignum_bytes(struct string_buf *buf, unsigned char **outbuf, + size_t *outlen); +int _libssh2_check_length(struct string_buf *buf, size_t requested_len); #if defined(LIBSSH2_WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__) /* provide a private one */ @@ -93,4 +115,11 @@ int __cdecl _libssh2_gettimeofday(struct timeval *tp, void *tzp); #endif #endif +void _libssh2_xor_data(unsigned char *output, + const unsigned char *input1, + const unsigned char *input2, + size_t length); + +void _libssh2_aes_ctr_increment(unsigned char *ctr, size_t length); + #endif /* _LIBSSH2_MISC_H */ diff --git a/vendor/libssh2/src/openssl.c b/vendor/libssh2/src/openssl.c index 4f63ef92bc..72a85b3b63 100644 --- a/vendor/libssh2/src/openssl.c +++ b/vendor/libssh2/src/openssl.c @@ -43,11 +43,37 @@ #ifdef LIBSSH2_OPENSSL /* compile only if we build with openssl */ #include +#include "misc.h" #ifndef EVP_MAX_BLOCK_LENGTH #define EVP_MAX_BLOCK_LENGTH 32 #endif +int +read_openssh_private_key_from_memory(void **key_ctx, LIBSSH2_SESSION *session, + const char *key_type, + const char *filedata, + size_t filedata_len, + unsigned const char *passphrase); + +static unsigned char * +write_bn(unsigned char *buf, const BIGNUM *bn, int bn_bytes) +{ + unsigned char *p = buf; + + /* Left space for bn size which will be written below. */ + p += 4; + + *p = 0; + BN_bn2bin(bn, p + 1); + if(!(*(p + 1) & 0x80)) { + memmove(p, p + 1, --bn_bytes); + } + _libssh2_htonu32(p - 4, bn_bytes); /* Post write bn size. */ + + return p + bn_bytes; +} + int _libssh2_rsa_new(libssh2_rsa_ctx ** rsa, const unsigned char *edata, @@ -81,7 +107,7 @@ _libssh2_rsa_new(libssh2_rsa_ctx ** rsa, n = BN_new(); BN_bin2bn(ndata, nlen, n); - if (ddata) { + if(ddata) { d = BN_new(); BN_bin2bn(ddata, dlen, d); @@ -107,6 +133,7 @@ _libssh2_rsa_new(libssh2_rsa_ctx ** rsa, #else (*rsa)->e = e; (*rsa)->n = n; + (*rsa)->d = d; #endif #ifdef HAVE_OPAQUE_STRUCTS @@ -127,21 +154,57 @@ _libssh2_rsa_new(libssh2_rsa_ctx ** rsa, } int -_libssh2_rsa_sha1_verify(libssh2_rsa_ctx * rsactx, +_libssh2_rsa_sha2_verify(libssh2_rsa_ctx * rsactx, + size_t hash_len, const unsigned char *sig, unsigned long sig_len, const unsigned char *m, unsigned long m_len) { - unsigned char hash[SHA_DIGEST_LENGTH]; int ret; + int nid_type; + unsigned char *hash = malloc(hash_len); + if(hash == NULL) + return -1; + + if(hash_len == SHA_DIGEST_LENGTH) { + nid_type = NID_sha1; + ret = _libssh2_sha1(m, m_len, hash); + } + else if(hash_len == SHA256_DIGEST_LENGTH) { + nid_type = NID_sha256; + ret = _libssh2_sha256(m, m_len, hash); - if (_libssh2_sha1(m, m_len, hash)) + } + else if(hash_len == SHA512_DIGEST_LENGTH) { + nid_type = NID_sha512; + ret = _libssh2_sha512(m, m_len, hash); + } + else + ret = -1; /* unsupported digest */ + + if(ret != 0) { + free(hash); return -1; /* failure */ - ret = RSA_verify(NID_sha1, hash, SHA_DIGEST_LENGTH, + } + + ret = RSA_verify(nid_type, hash, hash_len, (unsigned char *) sig, sig_len, rsactx); + + free(hash); + return (ret == 1) ? 0 : -1; } +int +_libssh2_rsa_sha1_verify(libssh2_rsa_ctx * rsactx, + const unsigned char *sig, + unsigned long sig_len, + const unsigned char *m, unsigned long m_len) +{ + return _libssh2_rsa_sha2_verify(rsactx, SHA_DIGEST_LENGTH, sig, sig_len, m, + m_len); +} + #if LIBSSH2_DSA int _libssh2_dsa_new(libssh2_dsa_ctx ** dsactx, @@ -173,7 +236,7 @@ _libssh2_dsa_new(libssh2_dsa_ctx ** dsactx, pub_key = BN_new(); BN_bin2bn(y, y_len, pub_key); - if (x_len) { + if(x_len) { priv_key = BN_new(); BN_bin2bn(x, x_len, priv_key); } @@ -220,7 +283,7 @@ _libssh2_dsa_sha1_verify(libssh2_dsa_ctx * dsactx, dsasig->r = r; dsasig->s = s; #endif - if (!_libssh2_sha1(m, m_len, hash)) + if(!_libssh2_sha1(m, m_len, hash)) /* _libssh2_sha1() succeeded */ ret = DSA_do_verify(hash, SHA_DIGEST_LENGTH, dsasig, dsactx); @@ -230,6 +293,147 @@ _libssh2_dsa_sha1_verify(libssh2_dsa_ctx * dsactx, } #endif /* LIBSSH_DSA */ +#if LIBSSH2_ECDSA + +/* _libssh2_ecdsa_get_curve_type + * + * returns key curve type that maps to libssh2_curve_type + * + */ + +libssh2_curve_type +_libssh2_ecdsa_get_curve_type(libssh2_ecdsa_ctx *ec_ctx) +{ + const EC_GROUP *group = EC_KEY_get0_group(ec_ctx); + return EC_GROUP_get_curve_name(group); +} + +/* _libssh2_ecdsa_curve_type_from_name + * + * returns 0 for success, key curve type that maps to libssh2_curve_type + * + */ + +int +_libssh2_ecdsa_curve_type_from_name(const char *name, + libssh2_curve_type *out_type) +{ + int ret = 0; + libssh2_curve_type type; + + if(name == NULL || strlen(name) != 19) + return -1; + + if(strcmp(name, "ecdsa-sha2-nistp256") == 0) + type = LIBSSH2_EC_CURVE_NISTP256; + else if(strcmp(name, "ecdsa-sha2-nistp384") == 0) + type = LIBSSH2_EC_CURVE_NISTP384; + else if(strcmp(name, "ecdsa-sha2-nistp521") == 0) + type = LIBSSH2_EC_CURVE_NISTP521; + else { + ret = -1; + } + + if(ret == 0 && out_type) { + *out_type = type; + } + + return ret; +} + +/* _libssh2_ecdsa_curve_name_with_octal_new + * + * Creates a new public key given an octal string, length and type + * + */ + +int +_libssh2_ecdsa_curve_name_with_octal_new(libssh2_ecdsa_ctx ** ec_ctx, + const unsigned char *k, + size_t k_len, libssh2_curve_type curve) +{ + + int ret = 0; + const EC_GROUP *ec_group = NULL; + EC_KEY *ec_key = EC_KEY_new_by_curve_name(curve); + EC_POINT *point = NULL; + + if(ec_key) { + ec_group = EC_KEY_get0_group(ec_key); + point = EC_POINT_new(ec_group); + ret = EC_POINT_oct2point(ec_group, point, k, k_len, NULL); + ret = EC_KEY_set_public_key(ec_key, point); + + if(point != NULL) + EC_POINT_free(point); + + if(ec_ctx != NULL) + *ec_ctx = ec_key; + } + + return (ret == 1) ? 0 : -1; +} + +#define LIBSSH2_ECDSA_VERIFY(digest_type) \ +{ \ + unsigned char hash[SHA##digest_type##_DIGEST_LENGTH]; \ + libssh2_sha##digest_type(m, m_len, hash); \ + ret = ECDSA_do_verify(hash, SHA##digest_type##_DIGEST_LENGTH, \ + ecdsa_sig, ec_key); \ + \ +} + +int +_libssh2_ecdsa_verify(libssh2_ecdsa_ctx * ctx, + const unsigned char *r, size_t r_len, + const unsigned char *s, size_t s_len, + const unsigned char *m, size_t m_len) +{ + int ret = 0; + EC_KEY *ec_key = (EC_KEY*)ctx; + libssh2_curve_type type = _libssh2_ecdsa_get_curve_type(ec_key); + +#ifdef HAVE_OPAQUE_STRUCTS + ECDSA_SIG *ecdsa_sig = ECDSA_SIG_new(); + BIGNUM *pr = BN_new(); + BIGNUM *ps = BN_new(); + + BN_bin2bn(r, r_len, pr); + BN_bin2bn(s, s_len, ps); + ECDSA_SIG_set0(ecdsa_sig, pr, ps); + +#else + ECDSA_SIG ecdsa_sig_; + ECDSA_SIG *ecdsa_sig = &ecdsa_sig_; + ecdsa_sig_.r = BN_new(); + BN_bin2bn(r, r_len, ecdsa_sig_.r); + ecdsa_sig_.s = BN_new(); + BN_bin2bn(s, s_len, ecdsa_sig_.s); +#endif + + if(type == LIBSSH2_EC_CURVE_NISTP256) { + LIBSSH2_ECDSA_VERIFY(256); + } + else if(type == LIBSSH2_EC_CURVE_NISTP384) { + LIBSSH2_ECDSA_VERIFY(384); + } + else if(type == LIBSSH2_EC_CURVE_NISTP521) { + LIBSSH2_ECDSA_VERIFY(512); + } + +#ifdef HAVE_OPAQUE_STRUCTS + if(ecdsa_sig) + ECDSA_SIG_free(ecdsa_sig); +#else + BN_clear_free(ecdsa_sig_.s); + BN_clear_free(ecdsa_sig_.r); +#endif + + return (ret == 1) ? 0 : -1; +} + +#endif /* LIBSSH2_ECDSA */ + int _libssh2_cipher_init(_libssh2_cipher_ctx * h, _libssh2_cipher_type(algo), @@ -259,10 +463,19 @@ _libssh2_cipher_crypt(_libssh2_cipher_ctx * ctx, #else ret = EVP_Cipher(ctx, buf, block, blocksize); #endif - if (ret == 1) { +#if defined(OPENSSL_VERSION_MAJOR) && OPENSSL_VERSION_MAJOR >= 3 + if(ret != -1) { +#else + if(ret == 1) { +#endif memcpy(block, buf, blocksize); } + +#if defined(OPENSSL_VERSION_MAJOR) && OPENSSL_VERSION_MAJOR >= 3 + return ret != -1 ? 0 : 1; +#else return ret == 1 ? 0 : 1; +#endif } #if LIBSSH2_AES_CTR && !defined(HAVE_EVP_AES_128_CTR) @@ -277,6 +490,10 @@ typedef struct unsigned char ctr[AES_BLOCK_SIZE]; } aes_ctr_ctx; +static EVP_CIPHER * aes_128_ctr_cipher = NULL; +static EVP_CIPHER * aes_192_ctr_cipher = NULL; +static EVP_CIPHER * aes_256_ctr_cipher = NULL; + static int aes_ctr_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) /* init key */ @@ -289,7 +506,7 @@ aes_ctr_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const EVP_CIPHER *aes_cipher; (void) enc; - switch (EVP_CIPHER_CTX_key_length(ctx)) { + switch(EVP_CIPHER_CTX_key_length(ctx)) { case 16: aes_cipher = EVP_aes_128_ecb(); break; @@ -304,7 +521,7 @@ aes_ctr_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, } c = malloc(sizeof(*c)); - if (c == NULL) + if(c == NULL) return 0; #ifdef HAVE_OPAQUE_STRUCTS @@ -312,12 +529,12 @@ aes_ctr_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, #else c->aes_ctx = malloc(sizeof(EVP_CIPHER_CTX)); #endif - if (c->aes_ctx == NULL) { + if(c->aes_ctx == NULL) { free(c); return 0; } - if (EVP_EncryptInit(c->aes_ctx, aes_cipher, key, NULL) != 1) { + if(EVP_EncryptInit(c->aes_ctx, aes_cipher, key, NULL) != 1) { #ifdef HAVE_OPAQUE_STRUCTS EVP_CIPHER_CTX_free(c->aes_ctx); #else @@ -343,13 +560,12 @@ aes_ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, { aes_ctr_ctx *c = EVP_CIPHER_CTX_get_app_data(ctx); unsigned char b1[AES_BLOCK_SIZE]; - size_t i = 0; int outlen = 0; - if (inl != 16) /* libssh2 only ever encrypt one block */ + if(inl != 16) /* libssh2 only ever encrypt one block */ return 0; - if (c == NULL) { + if(c == NULL) { return 0; } @@ -360,19 +576,13 @@ aes_ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, the ciphertext block C1. The counter X is then incremented */ - if (EVP_EncryptUpdate(c->aes_ctx, b1, &outlen, c->ctr, AES_BLOCK_SIZE) != 1) { + if(EVP_EncryptUpdate(c->aes_ctx, b1, &outlen, + c->ctr, AES_BLOCK_SIZE) != 1) { return 0; } - for (i = 0; i < 16; i++) - *out++ = *in++ ^ b1[i]; - - i = 15; - while (c->ctr[i]++ == 0xFF) { - if (i == 0) - break; - i--; - } + _libssh2_xor_data(out, in, b1, AES_BLOCK_SIZE); + _libssh2_aes_ctr_increment(c->ctr, AES_BLOCK_SIZE); return 1; } @@ -382,11 +592,11 @@ aes_ctr_cleanup(EVP_CIPHER_CTX *ctx) /* cleanup ctx */ { aes_ctr_ctx *c = EVP_CIPHER_CTX_get_app_data(ctx); - if (c == NULL) { + if(c == NULL) { return 1; } - if (c->aes_ctx != NULL) { + if(c->aes_ctx != NULL) { #ifdef HAVE_OPAQUE_STRUCTS EVP_CIPHER_CTX_free(c->aes_ctx); #else @@ -401,40 +611,43 @@ aes_ctr_cleanup(EVP_CIPHER_CTX *ctx) /* cleanup ctx */ } static const EVP_CIPHER * -make_ctr_evp (size_t keylen, EVP_CIPHER *aes_ctr_cipher, int type) +make_ctr_evp (size_t keylen, EVP_CIPHER **aes_ctr_cipher, int type) { #ifdef HAVE_OPAQUE_STRUCTS - aes_ctr_cipher = EVP_CIPHER_meth_new(type, 16, keylen); - if (aes_ctr_cipher) { - EVP_CIPHER_meth_set_iv_length(aes_ctr_cipher, 16); - EVP_CIPHER_meth_set_init(aes_ctr_cipher, aes_ctr_init); - EVP_CIPHER_meth_set_do_cipher(aes_ctr_cipher, aes_ctr_do_cipher); - EVP_CIPHER_meth_set_cleanup(aes_ctr_cipher, aes_ctr_cleanup); + *aes_ctr_cipher = EVP_CIPHER_meth_new(type, 16, keylen); + if(*aes_ctr_cipher) { + EVP_CIPHER_meth_set_iv_length(*aes_ctr_cipher, 16); + EVP_CIPHER_meth_set_init(*aes_ctr_cipher, aes_ctr_init); + EVP_CIPHER_meth_set_do_cipher(*aes_ctr_cipher, aes_ctr_do_cipher); + EVP_CIPHER_meth_set_cleanup(*aes_ctr_cipher, aes_ctr_cleanup); } #else - aes_ctr_cipher->nid = type; - aes_ctr_cipher->block_size = 16; - aes_ctr_cipher->key_len = keylen; - aes_ctr_cipher->iv_len = 16; - aes_ctr_cipher->init = aes_ctr_init; - aes_ctr_cipher->do_cipher = aes_ctr_do_cipher; - aes_ctr_cipher->cleanup = aes_ctr_cleanup; + (*aes_ctr_cipher)->nid = type; + (*aes_ctr_cipher)->block_size = 16; + (*aes_ctr_cipher)->key_len = keylen; + (*aes_ctr_cipher)->iv_len = 16; + (*aes_ctr_cipher)->init = aes_ctr_init; + (*aes_ctr_cipher)->do_cipher = aes_ctr_do_cipher; + (*aes_ctr_cipher)->cleanup = aes_ctr_cleanup; #endif - return aes_ctr_cipher; + return *aes_ctr_cipher; } const EVP_CIPHER * _libssh2_EVP_aes_128_ctr(void) { #ifdef HAVE_OPAQUE_STRUCTS - static EVP_CIPHER * aes_ctr_cipher; - return !aes_ctr_cipher? - make_ctr_evp (16, aes_ctr_cipher, NID_aes_128_ctr) : aes_ctr_cipher; + return !aes_128_ctr_cipher ? + make_ctr_evp(16, &aes_128_ctr_cipher, NID_aes_128_ctr) : + aes_128_ctr_cipher; #else static EVP_CIPHER aes_ctr_cipher; - return !aes_ctr_cipher.key_len? - make_ctr_evp (16, &aes_ctr_cipher, 0) : &aes_ctr_cipher; + if(!aes_128_ctr_cipher) { + aes_128_ctr_cipher = &aes_ctr_cipher; + make_ctr_evp(16, &aes_128_ctr_cipher, 0); + } + return aes_128_ctr_cipher; #endif } @@ -442,13 +655,16 @@ const EVP_CIPHER * _libssh2_EVP_aes_192_ctr(void) { #ifdef HAVE_OPAQUE_STRUCTS - static EVP_CIPHER * aes_ctr_cipher; - return !aes_ctr_cipher? - make_ctr_evp (24, aes_ctr_cipher, NID_aes_192_ctr) : aes_ctr_cipher; + return !aes_192_ctr_cipher ? + make_ctr_evp(24, &aes_192_ctr_cipher, NID_aes_192_ctr) : + aes_192_ctr_cipher; #else static EVP_CIPHER aes_ctr_cipher; - return !aes_ctr_cipher.key_len? - make_ctr_evp (24, &aes_ctr_cipher, 0) : &aes_ctr_cipher; + if(!aes_192_ctr_cipher) { + aes_192_ctr_cipher = &aes_ctr_cipher; + make_ctr_evp(24, &aes_192_ctr_cipher, 0); + } + return aes_192_ctr_cipher; #endif } @@ -456,26 +672,67 @@ const EVP_CIPHER * _libssh2_EVP_aes_256_ctr(void) { #ifdef HAVE_OPAQUE_STRUCTS - static EVP_CIPHER * aes_ctr_cipher; - return !aes_ctr_cipher? - make_ctr_evp (32, aes_ctr_cipher, NID_aes_256_ctr) : aes_ctr_cipher; + return !aes_256_ctr_cipher ? + make_ctr_evp(32, &aes_256_ctr_cipher, NID_aes_256_ctr) : + aes_256_ctr_cipher; #else static EVP_CIPHER aes_ctr_cipher; - return !aes_ctr_cipher.key_len? - make_ctr_evp (32, &aes_ctr_cipher, 0) : &aes_ctr_cipher; + if(!aes_256_ctr_cipher) { + aes_256_ctr_cipher = &aes_ctr_cipher; + make_ctr_evp(32, &aes_256_ctr_cipher, 0); + } + return aes_256_ctr_cipher; #endif } -void _libssh2_init_aes_ctr(void) +#endif /* LIBSSH2_AES_CTR && !defined(HAVE_EVP_AES_128_CTR) */ + +void _libssh2_openssl_crypto_init(void) { - _libssh2_EVP_aes_128_ctr(); - _libssh2_EVP_aes_192_ctr(); - _libssh2_EVP_aes_256_ctr(); +#if OPENSSL_VERSION_NUMBER >= 0x10100000L && \ + !defined(LIBRESSL_VERSION_NUMBER) +#ifndef OPENSSL_NO_ENGINE + ENGINE_load_builtin_engines(); + ENGINE_register_all_complete(); +#endif +#else + OpenSSL_add_all_algorithms(); + OpenSSL_add_all_ciphers(); + OpenSSL_add_all_digests(); +#ifndef OPENSSL_NO_ENGINE + ENGINE_load_builtin_engines(); + ENGINE_register_all_complete(); +#endif +#endif +#if LIBSSH2_AES_CTR && !defined(HAVE_EVP_AES_128_CTR) + aes_128_ctr_cipher = (EVP_CIPHER *) _libssh2_EVP_aes_128_ctr(); + aes_192_ctr_cipher = (EVP_CIPHER *) _libssh2_EVP_aes_192_ctr(); + aes_256_ctr_cipher = (EVP_CIPHER *) _libssh2_EVP_aes_256_ctr(); +#endif } -#else -void _libssh2_init_aes_ctr(void) {} -#endif /* LIBSSH2_AES_CTR */ +void _libssh2_openssl_crypto_exit(void) +{ +#if LIBSSH2_AES_CTR && !defined(HAVE_EVP_AES_128_CTR) +#ifdef HAVE_OPAQUE_STRUCTS + if(aes_128_ctr_cipher) { + EVP_CIPHER_meth_free(aes_128_ctr_cipher); + } + + if(aes_192_ctr_cipher) { + EVP_CIPHER_meth_free(aes_192_ctr_cipher); + } + + if(aes_256_ctr_cipher) { + EVP_CIPHER_meth_free(aes_256_ctr_cipher); + } +#endif + + aes_128_ctr_cipher = NULL; + aes_192_ctr_cipher = NULL; + aes_256_ctr_cipher = NULL; +#endif +} /* TODO: Optionally call a passphrase callback specified by the * calling program @@ -486,7 +743,7 @@ passphrase_cb(char *buf, int size, int rwflag, char *passphrase) int passphrase_len = strlen(passphrase); (void) rwflag; - if (passphrase_len > (size - 1)) { + if(passphrase_len > (size - 1)) { passphrase_len = size - 1; } memcpy(buf, passphrase, passphrase_len); @@ -496,12 +753,12 @@ passphrase_cb(char *buf, int size, int rwflag, char *passphrase) } typedef void * (*pem_read_bio_func)(BIO *, void **, pem_password_cb *, - void * u); + void *u); static int -read_private_key_from_memory(void ** key_ctx, +read_private_key_from_memory(void **key_ctx, pem_read_bio_func read_private_key, - const char * filedata, + const char *filedata, size_t filedata_len, unsigned const char *passphrase) { @@ -510,7 +767,7 @@ read_private_key_from_memory(void ** key_ctx, *key_ctx = NULL; bp = BIO_new_mem_buf((char *)filedata, filedata_len); - if (!bp) { + if(!bp) { return -1; } *key_ctx = read_private_key(bp, NULL, (pem_password_cb *) passphrase_cb, @@ -520,10 +777,12 @@ read_private_key_from_memory(void ** key_ctx, return (*key_ctx) ? 0 : -1; } + + static int -read_private_key_from_file(void ** key_ctx, +read_private_key_from_file(void **key_ctx, pem_read_bio_func read_private_key, - const char * filename, + const char *filename, unsigned const char *passphrase) { BIO * bp; @@ -531,7 +790,7 @@ read_private_key_from_file(void ** key_ctx, *key_ctx = NULL; bp = BIO_new_file(filename, "r"); - if (!bp) { + if(!bp) { return -1; } @@ -548,109 +807,1178 @@ _libssh2_rsa_new_private_frommemory(libssh2_rsa_ctx ** rsa, const char *filedata, size_t filedata_len, unsigned const char *passphrase) { + int rc; + pem_read_bio_func read_rsa = (pem_read_bio_func) &PEM_read_bio_RSAPrivateKey; - (void) session; _libssh2_init_if_needed(); - return read_private_key_from_memory((void **) rsa, read_rsa, - filedata, filedata_len, passphrase); -} - -int -_libssh2_rsa_new_private(libssh2_rsa_ctx ** rsa, - LIBSSH2_SESSION * session, - const char *filename, unsigned const char *passphrase) -{ - pem_read_bio_func read_rsa = - (pem_read_bio_func) &PEM_read_bio_RSAPrivateKey; - (void) session; + rc = read_private_key_from_memory((void **) rsa, read_rsa, + filedata, filedata_len, passphrase); - _libssh2_init_if_needed (); + if(rc) { + rc = read_openssh_private_key_from_memory((void **)rsa, session, + "ssh-rsa", filedata, filedata_len, passphrase); + } - return read_private_key_from_file((void **) rsa, read_rsa, - filename, passphrase); +return rc; } -#if LIBSSH2_DSA -int -_libssh2_dsa_new_private_frommemory(libssh2_dsa_ctx ** dsa, - LIBSSH2_SESSION * session, - const char *filedata, size_t filedata_len, - unsigned const char *passphrase) +static unsigned char * +gen_publickey_from_rsa(LIBSSH2_SESSION *session, RSA *rsa, + size_t *key_len) { - pem_read_bio_func read_dsa = - (pem_read_bio_func) &PEM_read_bio_DSAPrivateKey; - (void) session; + int e_bytes, n_bytes; + unsigned long len; + unsigned char *key; + unsigned char *p; + const BIGNUM * e; + const BIGNUM * n; +#ifdef HAVE_OPAQUE_STRUCTS + RSA_get0_key(rsa, &n, &e, NULL); +#else + e = rsa->e; + n = rsa->n; +#endif + e_bytes = BN_num_bytes(e) + 1; + n_bytes = BN_num_bytes(n) + 1; - _libssh2_init_if_needed(); + /* Key form is "ssh-rsa" + e + n. */ + len = 4 + 7 + 4 + e_bytes + 4 + n_bytes; - return read_private_key_from_memory((void **) dsa, read_dsa, - filedata, filedata_len, passphrase); -} + key = LIBSSH2_ALLOC(session, len); + if(key == NULL) { + return NULL; + } -int -_libssh2_dsa_new_private(libssh2_dsa_ctx ** dsa, - LIBSSH2_SESSION * session, - const char *filename, unsigned const char *passphrase) -{ - pem_read_bio_func read_dsa = - (pem_read_bio_func) &PEM_read_bio_DSAPrivateKey; - (void) session; + /* Process key encoding. */ + p = key; + + _libssh2_htonu32(p, 7); /* Key type. */ + p += 4; + memcpy(p, "ssh-rsa", 7); + p += 7; - _libssh2_init_if_needed (); + p = write_bn(p, e, e_bytes); + p = write_bn(p, n, n_bytes); - return read_private_key_from_file((void **) dsa, read_dsa, - filename, passphrase); + *key_len = (size_t)(p - key); + return key; } -#endif /* LIBSSH_DSA */ -int -_libssh2_rsa_sha1_sign(LIBSSH2_SESSION * session, - libssh2_rsa_ctx * rsactx, - const unsigned char *hash, - size_t hash_len, - unsigned char **signature, size_t *signature_len) +static int +gen_publickey_from_rsa_evp(LIBSSH2_SESSION *session, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + EVP_PKEY *pk) { - int ret; - unsigned char *sig; - unsigned int sig_len; + RSA* rsa = NULL; + unsigned char *key; + unsigned char *method_buf = NULL; + size_t key_len; - sig_len = RSA_size(rsactx); - sig = LIBSSH2_ALLOC(session, sig_len); + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing public key from RSA private key envelope"); - if (!sig) { - return -1; + rsa = EVP_PKEY_get1_RSA(pk); + if(rsa == NULL) { + /* Assume memory allocation error... what else could it be ? */ + goto __alloc_error; } - ret = RSA_sign(NID_sha1, hash, hash_len, sig, &sig_len, rsactx); - - if (!ret) { - LIBSSH2_FREE(session, sig); - return -1; + method_buf = LIBSSH2_ALLOC(session, 7); /* ssh-rsa. */ + if(method_buf == NULL) { + goto __alloc_error; } - *signature = sig; - *signature_len = sig_len; + key = gen_publickey_from_rsa(session, rsa, &key_len); + if(key == NULL) { + goto __alloc_error; + } + RSA_free(rsa); + memcpy(method_buf, "ssh-rsa", 7); + *method = method_buf; + *method_len = 7; + *pubkeydata = key; + *pubkeydata_len = key_len; return 0; + + __alloc_error: + if(rsa != NULL) { + RSA_free(rsa); + } + if(method_buf != NULL) { + LIBSSH2_FREE(session, method_buf); + } + + return _libssh2_error(session, + LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for private key data"); } -#if LIBSSH2_DSA -int -_libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, - const unsigned char *hash, - unsigned long hash_len, unsigned char *signature) +static int _libssh2_rsa_new_additional_parameters(RSA *rsa) { - DSA_SIG *sig; - const BIGNUM * r; - const BIGNUM * s; - int r_len, s_len; - (void) hash_len; + BN_CTX *ctx = NULL; + BIGNUM *aux = NULL; + BIGNUM *dmp1 = NULL; + BIGNUM *dmq1 = NULL; + const BIGNUM *p = NULL; + const BIGNUM *q = NULL; + const BIGNUM *d = NULL; + int rc = 0; - sig = DSA_do_sign(hash, SHA_DIGEST_LENGTH, dsactx); - if (!sig) { +#ifdef HAVE_OPAQUE_STRUCTS + RSA_get0_key(rsa, NULL, NULL, &d); + RSA_get0_factors(rsa, &p, &q); +#else + d = (*rsa).d; + p = (*rsa).p; + q = (*rsa).q; +#endif + + ctx = BN_CTX_new(); + if(ctx == NULL) + return -1; + + aux = BN_new(); + if(aux == NULL) { + rc = -1; + goto out; + } + + dmp1 = BN_new(); + if(dmp1 == NULL) { + rc = -1; + goto out; + } + + dmq1 = BN_new(); + if(dmq1 == NULL) { + rc = -1; + goto out; + } + + if((BN_sub(aux, q, BN_value_one()) == 0) || + (BN_mod(dmq1, d, aux, ctx) == 0) || + (BN_sub(aux, p, BN_value_one()) == 0) || + (BN_mod(dmp1, d, aux, ctx) == 0)) { + rc = -1; + goto out; + } + +#ifdef HAVE_OPAQUE_STRUCTS + RSA_set0_crt_params(rsa, dmp1, dmq1, NULL); +#else + (*rsa).dmp1 = dmp1; + (*rsa).dmq1 = dmq1; +#endif + +out: + if(aux) + BN_clear_free(aux); + BN_CTX_free(ctx); + + if(rc != 0) { + if(dmp1) + BN_clear_free(dmp1); + if(dmq1) + BN_clear_free(dmq1); + } + + return rc; +} + +static int +gen_publickey_from_rsa_openssh_priv_data(LIBSSH2_SESSION *session, + struct string_buf *decrypted, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + libssh2_rsa_ctx **rsa_ctx) +{ + int rc = 0; + size_t nlen, elen, dlen, plen, qlen, coefflen, commentlen; + unsigned char *n, *e, *d, *p, *q, *coeff, *comment; + RSA *rsa = NULL; + + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing RSA keys from private key data"); + + /* public key data */ + if(_libssh2_get_bignum_bytes(decrypted, &n, &nlen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "RSA no n"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &e, &elen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "RSA no e"); + return -1; + } + + /* private key data */ + if(_libssh2_get_bignum_bytes(decrypted, &d, &dlen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "RSA no d"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &coeff, &coefflen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "RSA no coeff"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &p, &plen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "RSA no p"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &q, &qlen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "RSA no q"); + return -1; + } + + if(_libssh2_get_string(decrypted, &comment, &commentlen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "RSA no comment"); + return -1; + } + + if((rc = _libssh2_rsa_new(&rsa, e, elen, n, nlen, d, dlen, p, plen, + q, qlen, NULL, 0, NULL, 0, + coeff, coefflen)) != 0) { + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Could not create RSA private key"); + goto fail; + } + + if(rsa != NULL) + rc = _libssh2_rsa_new_additional_parameters(rsa); + + if(rsa != NULL && pubkeydata != NULL && method != NULL) { + EVP_PKEY *pk = EVP_PKEY_new(); + EVP_PKEY_set1_RSA(pk, rsa); + + rc = gen_publickey_from_rsa_evp(session, method, method_len, + pubkeydata, pubkeydata_len, + pk); + + if(pk) + EVP_PKEY_free(pk); + } + + if(rsa_ctx != NULL) + *rsa_ctx = rsa; + else + RSA_free(rsa); + + return rc; + +fail: + + if(rsa != NULL) + RSA_free(rsa); + + return _libssh2_error(session, + LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for private key data"); +} + +static int +_libssh2_rsa_new_openssh_private(libssh2_rsa_ctx ** rsa, + LIBSSH2_SESSION * session, + const char *filename, + unsigned const char *passphrase) +{ + FILE *fp; + int rc; + unsigned char *buf = NULL; + struct string_buf *decrypted = NULL; + + if(session == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Session is required"); + return -1; + } + + _libssh2_init_if_needed(); + + fp = fopen(filename, "r"); + if(!fp) { + _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Unable to open OpenSSH RSA private key file"); + return -1; + } + + rc = _libssh2_openssh_pem_parse(session, passphrase, fp, &decrypted); + fclose(fp); + if(rc) { + return rc; + } + + /* We have a new key file, now try and parse it using supported types */ + rc = _libssh2_get_string(decrypted, &buf, NULL); + + if(rc != 0 || buf == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Public key type in decrypted key data not found"); + return -1; + } + + if(strcmp("ssh-rsa", (const char *)buf) == 0) { + rc = gen_publickey_from_rsa_openssh_priv_data(session, decrypted, + NULL, 0, + NULL, 0, rsa); + } + else { + rc = -1; + } + + if(decrypted) + _libssh2_string_buf_free(session, decrypted); + + return rc; +} + +int +_libssh2_rsa_new_private(libssh2_rsa_ctx ** rsa, + LIBSSH2_SESSION * session, + const char *filename, unsigned const char *passphrase) +{ + int rc; + + pem_read_bio_func read_rsa = + (pem_read_bio_func) &PEM_read_bio_RSAPrivateKey; + + _libssh2_init_if_needed(); + + rc = read_private_key_from_file((void **) rsa, read_rsa, + filename, passphrase); + + if(rc) { + rc = _libssh2_rsa_new_openssh_private(rsa, session, + filename, passphrase); + } + + return rc; +} + +#if LIBSSH2_DSA +int +_libssh2_dsa_new_private_frommemory(libssh2_dsa_ctx ** dsa, + LIBSSH2_SESSION * session, + const char *filedata, size_t filedata_len, + unsigned const char *passphrase) +{ + int rc; + + pem_read_bio_func read_dsa = + (pem_read_bio_func) &PEM_read_bio_DSAPrivateKey; + + _libssh2_init_if_needed(); + + rc = read_private_key_from_memory((void **)dsa, read_dsa, + filedata, filedata_len, passphrase); + + if(rc) { + rc = read_openssh_private_key_from_memory((void **)dsa, session, + "ssh-dsa", filedata, filedata_len, passphrase); + } + + return rc; +} + +static unsigned char * +gen_publickey_from_dsa(LIBSSH2_SESSION* session, DSA *dsa, + size_t *key_len) +{ + int p_bytes, q_bytes, g_bytes, k_bytes; + unsigned long len; + unsigned char *key; + unsigned char *p; + + const BIGNUM * p_bn; + const BIGNUM * q; + const BIGNUM * g; + const BIGNUM * pub_key; +#ifdef HAVE_OPAQUE_STRUCTS + DSA_get0_pqg(dsa, &p_bn, &q, &g); +#else + p_bn = dsa->p; + q = dsa->q; + g = dsa->g; +#endif + +#ifdef HAVE_OPAQUE_STRUCTS + DSA_get0_key(dsa, &pub_key, NULL); +#else + pub_key = dsa->pub_key; +#endif + p_bytes = BN_num_bytes(p_bn) + 1; + q_bytes = BN_num_bytes(q) + 1; + g_bytes = BN_num_bytes(g) + 1; + k_bytes = BN_num_bytes(pub_key) + 1; + + /* Key form is "ssh-dss" + p + q + g + pub_key. */ + len = 4 + 7 + 4 + p_bytes + 4 + q_bytes + 4 + g_bytes + 4 + k_bytes; + + key = LIBSSH2_ALLOC(session, len); + if(key == NULL) { + return NULL; + } + + /* Process key encoding. */ + p = key; + + _libssh2_htonu32(p, 7); /* Key type. */ + p += 4; + memcpy(p, "ssh-dss", 7); + p += 7; + + p = write_bn(p, p_bn, p_bytes); + p = write_bn(p, q, q_bytes); + p = write_bn(p, g, g_bytes); + p = write_bn(p, pub_key, k_bytes); + + *key_len = (size_t)(p - key); + return key; +} + +static int +gen_publickey_from_dsa_evp(LIBSSH2_SESSION *session, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + EVP_PKEY *pk) +{ + DSA* dsa = NULL; + unsigned char *key; + unsigned char *method_buf = NULL; + size_t key_len; + + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing public key from DSA private key envelope"); + + dsa = EVP_PKEY_get1_DSA(pk); + if(dsa == NULL) { + /* Assume memory allocation error... what else could it be ? */ + goto __alloc_error; + } + + method_buf = LIBSSH2_ALLOC(session, 7); /* ssh-dss. */ + if(method_buf == NULL) { + goto __alloc_error; + } + + key = gen_publickey_from_dsa(session, dsa, &key_len); + if(key == NULL) { + goto __alloc_error; + } + DSA_free(dsa); + + memcpy(method_buf, "ssh-dss", 7); + *method = method_buf; + *method_len = 7; + *pubkeydata = key; + *pubkeydata_len = key_len; + return 0; + + __alloc_error: + if(dsa != NULL) { + DSA_free(dsa); + } + if(method_buf != NULL) { + LIBSSH2_FREE(session, method_buf); + } + + return _libssh2_error(session, + LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for private key data"); +} + +static int +gen_publickey_from_dsa_openssh_priv_data(LIBSSH2_SESSION *session, + struct string_buf *decrypted, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + libssh2_dsa_ctx **dsa_ctx) +{ + int rc = 0; + size_t plen, qlen, glen, pub_len, priv_len; + unsigned char *p, *q, *g, *pub_key, *priv_key; + DSA *dsa = NULL; + + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing DSA keys from private key data"); + + if(_libssh2_get_bignum_bytes(decrypted, &p, &plen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "DSA no p"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &q, &qlen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "DSA no q"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &g, &glen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "DSA no g"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &pub_key, &pub_len)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "DSA no public key"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &priv_key, &priv_len)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "DSA no private key"); + return -1; + } + + rc = _libssh2_dsa_new(&dsa, p, plen, q, qlen, g, glen, pub_key, pub_len, + priv_key, priv_len); + if(rc != 0) { + _libssh2_debug(session, + LIBSSH2_ERROR_PROTO, + "Could not create DSA private key"); + goto fail; + } + + if(dsa != NULL && pubkeydata != NULL && method != NULL) { + EVP_PKEY *pk = EVP_PKEY_new(); + EVP_PKEY_set1_DSA(pk, dsa); + + rc = gen_publickey_from_dsa_evp(session, method, method_len, + pubkeydata, pubkeydata_len, + pk); + + if(pk) + EVP_PKEY_free(pk); + } + + if(dsa_ctx != NULL) + *dsa_ctx = dsa; + else + DSA_free(dsa); + + return rc; + +fail: + + if(dsa != NULL) + DSA_free(dsa); + + return _libssh2_error(session, + LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for private key data"); +} + +static int +_libssh2_dsa_new_openssh_private(libssh2_dsa_ctx ** dsa, + LIBSSH2_SESSION * session, + const char *filename, + unsigned const char *passphrase) +{ + FILE *fp; + int rc; + unsigned char *buf = NULL; + struct string_buf *decrypted = NULL; + + if(session == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Session is required"); + return -1; + } + + _libssh2_init_if_needed(); + + fp = fopen(filename, "r"); + if(!fp) { + _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Unable to open OpenSSH DSA private key file"); + return -1; + } + + rc = _libssh2_openssh_pem_parse(session, passphrase, fp, &decrypted); + fclose(fp); + if(rc) { + return rc; + } + + /* We have a new key file, now try and parse it using supported types */ + rc = _libssh2_get_string(decrypted, &buf, NULL); + + if(rc != 0 || buf == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Public key type in decrypted key data not found"); + return -1; + } + + if(strcmp("ssh-dss", (const char *)buf) == 0) { + rc = gen_publickey_from_dsa_openssh_priv_data(session, decrypted, + NULL, 0, + NULL, 0, dsa); + } + else { + rc = -1; + } + + if(decrypted) + _libssh2_string_buf_free(session, decrypted); + + return rc; +} + +int +_libssh2_dsa_new_private(libssh2_dsa_ctx ** dsa, + LIBSSH2_SESSION * session, + const char *filename, unsigned const char *passphrase) +{ + int rc; + + pem_read_bio_func read_dsa = + (pem_read_bio_func) &PEM_read_bio_DSAPrivateKey; + + _libssh2_init_if_needed(); + + rc = read_private_key_from_file((void **) dsa, read_dsa, + filename, passphrase); + + if(rc) { + rc = _libssh2_dsa_new_openssh_private(dsa, session, + filename, passphrase); + } + + return rc; +} + +#endif /* LIBSSH_DSA */ + +#if LIBSSH2_ECDSA + +int +_libssh2_ecdsa_new_private_frommemory(libssh2_ecdsa_ctx ** ec_ctx, + LIBSSH2_SESSION * session, + const char *filedata, size_t filedata_len, + unsigned const char *passphrase) +{ + int rc; + + pem_read_bio_func read_ec = + (pem_read_bio_func) &PEM_read_bio_ECPrivateKey; + + _libssh2_init_if_needed(); + + rc = read_private_key_from_memory((void **) ec_ctx, read_ec, + filedata, filedata_len, passphrase); + + if(rc) { + rc = read_openssh_private_key_from_memory((void **)ec_ctx, session, + "ssh-ecdsa", filedata, + filedata_len, passphrase); + } + + return rc; +} + +#endif /* LIBSSH2_ECDSA */ + + +#if LIBSSH2_ED25519 + +int +_libssh2_curve25519_new(LIBSSH2_SESSION *session, + unsigned char **out_public_key, + unsigned char **out_private_key) +{ + EVP_PKEY *key = NULL; + EVP_PKEY_CTX *pctx = NULL; + unsigned char *priv = NULL, *pub = NULL; + size_t privLen, pubLen; + int rc = -1; + + pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_X25519, NULL); + if(pctx == NULL) + return -1; + + if(EVP_PKEY_keygen_init(pctx) != 1 || + EVP_PKEY_keygen(pctx, &key) != 1) { + goto cleanExit; + } + + if(out_private_key != NULL) { + privLen = LIBSSH2_ED25519_KEY_LEN; + priv = LIBSSH2_ALLOC(session, privLen); + if(priv == NULL) + goto cleanExit; + + if(EVP_PKEY_get_raw_private_key(key, priv, &privLen) != 1 || + privLen != LIBSSH2_ED25519_KEY_LEN) { + goto cleanExit; + } + + *out_private_key = priv; + priv = NULL; + } + + if(out_public_key != NULL) { + pubLen = LIBSSH2_ED25519_KEY_LEN; + pub = LIBSSH2_ALLOC(session, pubLen); + if(pub == NULL) + goto cleanExit; + + if(EVP_PKEY_get_raw_public_key(key, pub, &pubLen) != 1 || + pubLen != LIBSSH2_ED25519_KEY_LEN) { + goto cleanExit; + } + + *out_public_key = pub; + pub = NULL; + } + + /* success */ + rc = 0; + +cleanExit: + + if(pctx) + EVP_PKEY_CTX_free(pctx); + if(key) + EVP_PKEY_free(key); + if(priv) + LIBSSH2_FREE(session, priv); + if(pub) + LIBSSH2_FREE(session, pub); + + return rc; +} + + +static int +gen_publickey_from_ed_evp(LIBSSH2_SESSION *session, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + EVP_PKEY *pk) +{ + const char methodName[] = "ssh-ed25519"; + unsigned char *methodBuf = NULL; + size_t rawKeyLen = 0; + unsigned char *keyBuf = NULL; + size_t bufLen = 0; + unsigned char *bufPos = NULL; + + _libssh2_debug(session, LIBSSH2_TRACE_AUTH, + "Computing public key from ED private key envelope"); + + methodBuf = LIBSSH2_ALLOC(session, sizeof(methodName) - 1); + if(!methodBuf) { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for private key data"); + goto fail; + } + memcpy(methodBuf, methodName, sizeof(methodName) - 1); + + if(EVP_PKEY_get_raw_public_key(pk, NULL, &rawKeyLen) != 1) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "EVP_PKEY_get_raw_public_key failed"); + goto fail; + } + + /* Key form is: type_len(4) + type(11) + pub_key_len(4) + pub_key(32). */ + bufLen = 4 + sizeof(methodName) - 1 + 4 + rawKeyLen; + bufPos = keyBuf = LIBSSH2_ALLOC(session, bufLen); + if(!keyBuf) { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for private key data"); + goto fail; + } + + _libssh2_store_str(&bufPos, methodName, sizeof(methodName) - 1); + _libssh2_store_u32(&bufPos, rawKeyLen); + + if(EVP_PKEY_get_raw_public_key(pk, bufPos, &rawKeyLen) != 1) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "EVP_PKEY_get_raw_public_key failed"); + goto fail; + } + + *method = methodBuf; + *method_len = sizeof(methodName) - 1; + *pubkeydata = keyBuf; + *pubkeydata_len = bufLen; + return 0; + +fail: + if(methodBuf) + LIBSSH2_FREE(session, methodBuf); + if(keyBuf) + LIBSSH2_FREE(session, keyBuf); + return -1; +} + + +static int +gen_publickey_from_ed25519_openssh_priv_data(LIBSSH2_SESSION *session, + struct string_buf *decrypted, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + libssh2_ed25519_ctx **out_ctx) +{ + libssh2_ed25519_ctx *ctx = NULL; + unsigned char *method_buf = NULL; + unsigned char *key = NULL; + int i, ret = 0; + unsigned char *pub_key, *priv_key, *buf; + size_t key_len = 0, tmp_len = 0; + unsigned char *p; + + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing ED25519 keys from private key data"); + + if(_libssh2_get_string(decrypted, &pub_key, &tmp_len) || + tmp_len != LIBSSH2_ED25519_KEY_LEN) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Wrong public key length"); + return -1; + } + + if(_libssh2_get_string(decrypted, &priv_key, &tmp_len) || + tmp_len != LIBSSH2_ED25519_PRIVATE_KEY_LEN) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Wrong private key length"); + ret = -1; + goto clean_exit; + } + + /* first 32 bytes of priv_key is the private key, the last 32 bytes are + the public key */ + ctx = EVP_PKEY_new_raw_private_key(EVP_PKEY_ED25519, NULL, + (const unsigned char *)priv_key, + LIBSSH2_ED25519_KEY_LEN); + + /* comment */ + if(_libssh2_get_string(decrypted, &buf, &tmp_len)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unable to read comment"); + ret = -1; + goto clean_exit; + } + + if(tmp_len > 0) { + unsigned char *comment = LIBSSH2_CALLOC(session, tmp_len + 1); + if(comment != NULL) { + memcpy(comment, buf, tmp_len); + memcpy(comment + tmp_len, "\0", 1); + + _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Key comment: %s", + comment); + + LIBSSH2_FREE(session, comment); + } + } + + /* Padding */ + i = 1; + while(decrypted->dataptr < decrypted->data + decrypted->len) { + if(*decrypted->dataptr != i) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Wrong padding"); + ret = -1; + goto clean_exit; + } + i++; + decrypted->dataptr++; + } + + if(ret == 0) { + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing public key from ED25519 " + "private key envelope"); + + method_buf = LIBSSH2_ALLOC(session, 11); /* ssh-ed25519. */ + if(method_buf == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for ED25519 key"); + goto clean_exit; + } + + /* Key form is: type_len(4) + type(11) + pub_key_len(4) + + pub_key(32). */ + key_len = LIBSSH2_ED25519_KEY_LEN + 19; + key = LIBSSH2_CALLOC(session, key_len); + if(key == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for ED25519 key"); + goto clean_exit; + } + + p = key; + + _libssh2_store_str(&p, "ssh-ed25519", 11); + _libssh2_store_str(&p, (const char *)pub_key, LIBSSH2_ED25519_KEY_LEN); + + memcpy(method_buf, "ssh-ed25519", 11); + + if(method != NULL) + *method = method_buf; + else + LIBSSH2_FREE(session, method_buf); + + if(method_len != NULL) + *method_len = 11; + + if(pubkeydata != NULL) + *pubkeydata = key; + else + LIBSSH2_FREE(session, key); + + if(pubkeydata_len != NULL) + *pubkeydata_len = key_len; + + if(out_ctx != NULL) + *out_ctx = ctx; + else if(ctx != NULL) + _libssh2_ed25519_free(ctx); + + return 0; + } + +clean_exit: + + if(ctx) + _libssh2_ed25519_free(ctx); + + if(method_buf) + LIBSSH2_FREE(session, method_buf); + + if(key) + LIBSSH2_FREE(session, key); + + return -1; +} + +int +_libssh2_ed25519_new_private(libssh2_ed25519_ctx ** ed_ctx, + LIBSSH2_SESSION * session, + const char *filename, const uint8_t *passphrase) +{ + int rc; + FILE *fp; + unsigned char *buf; + struct string_buf *decrypted = NULL; + libssh2_ed25519_ctx *ctx = NULL; + + if(session == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Session is required"); + return -1; + } + + _libssh2_init_if_needed(); + + fp = fopen(filename, "r"); + if(!fp) { + _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Unable to open ED25519 private key file"); + return -1; + } + + rc = _libssh2_openssh_pem_parse(session, passphrase, fp, &decrypted); + fclose(fp); + if(rc) { + return rc; + } + + /* We have a new key file, now try and parse it using supported types */ + rc = _libssh2_get_string(decrypted, &buf, NULL); + + if(rc != 0 || buf == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Public key type in decrypted key data not found"); + return -1; + } + + if(strcmp("ssh-ed25519", (const char *)buf) == 0) { + rc = gen_publickey_from_ed25519_openssh_priv_data(session, + decrypted, + NULL, + NULL, + NULL, + NULL, + &ctx); + } + else { + rc = -1; + } + + if(decrypted) + _libssh2_string_buf_free(session, decrypted); + + if(rc == 0) { + if(ed_ctx != NULL) + *ed_ctx = ctx; + else if(ctx != NULL) + _libssh2_ed25519_free(ctx); + } + + return rc; +} + +int +_libssh2_ed25519_new_private_frommemory(libssh2_ed25519_ctx ** ed_ctx, + LIBSSH2_SESSION * session, + const char *filedata, + size_t filedata_len, + unsigned const char *passphrase) +{ + libssh2_ed25519_ctx *ctx = NULL; + + _libssh2_init_if_needed(); + + if(read_private_key_from_memory((void **)&ctx, + (pem_read_bio_func) + &PEM_read_bio_PrivateKey, + filedata, filedata_len, passphrase) == 0) { + if(EVP_PKEY_id(ctx) != EVP_PKEY_ED25519) { + _libssh2_ed25519_free(ctx); + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Private key is not an ED25519 key"); + } + + *ed_ctx = ctx; + return 0; + } + + return read_openssh_private_key_from_memory((void **)ed_ctx, session, + "ssh-ed25519", + filedata, filedata_len, + passphrase); +} + +int +_libssh2_ed25519_new_public(libssh2_ed25519_ctx ** ed_ctx, + LIBSSH2_SESSION * session, + const unsigned char *raw_pub_key, + const uint8_t key_len) +{ + libssh2_ed25519_ctx *ctx = NULL; + + if(ed_ctx == NULL) + return -1; + + ctx = EVP_PKEY_new_raw_public_key(EVP_PKEY_ED25519, NULL, + raw_pub_key, key_len); + if(!ctx) + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "could not create ED25519 public key"); + + if(ed_ctx != NULL) + *ed_ctx = ctx; + else if(ctx) + _libssh2_ed25519_free(ctx); + + return 0; +} +#endif /* LIBSSH2_ED25519 */ + + +int +_libssh2_rsa_sha2_sign(LIBSSH2_SESSION * session, + libssh2_rsa_ctx * rsactx, + const unsigned char *hash, + size_t hash_len, + unsigned char **signature, size_t *signature_len) +{ + int ret; + unsigned char *sig; + unsigned int sig_len; + + sig_len = RSA_size(rsactx); + sig = LIBSSH2_ALLOC(session, sig_len); + + if(!sig) { + return -1; + } + + if(hash_len == SHA_DIGEST_LENGTH) + ret = RSA_sign(NID_sha1, hash, hash_len, sig, &sig_len, rsactx); + else if(hash_len == SHA256_DIGEST_LENGTH) + ret = RSA_sign(NID_sha256, hash, hash_len, sig, &sig_len, rsactx); + else if(hash_len == SHA512_DIGEST_LENGTH) + ret = RSA_sign(NID_sha512, hash, hash_len, sig, &sig_len, rsactx); + else { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unsupported hash digest length"); + ret = -1; + } + + if(!ret) { + LIBSSH2_FREE(session, sig); + return -1; + } + + *signature = sig; + *signature_len = sig_len; + + return 0; +} + + +int +_libssh2_rsa_sha1_sign(LIBSSH2_SESSION * session, + libssh2_rsa_ctx * rsactx, + const unsigned char *hash, + size_t hash_len, + unsigned char **signature, size_t *signature_len) +{ + return _libssh2_rsa_sha2_sign(session, rsactx, hash, hash_len, + signature, signature_len); +} + + +#if LIBSSH2_DSA +int +_libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, + const unsigned char *hash, + unsigned long hash_len, unsigned char *signature) +{ + DSA_SIG *sig; + const BIGNUM * r; + const BIGNUM * s; + int r_len, s_len; + (void) hash_len; + + sig = DSA_do_sign(hash, SHA_DIGEST_LENGTH, dsactx); + if(!sig) { return -1; } @@ -661,26 +1989,89 @@ _libssh2_dsa_sha1_sign(libssh2_dsa_ctx * dsactx, s = sig->s; #endif r_len = BN_num_bytes(r); - if (r_len < 1 || r_len > 20) { + if(r_len < 1 || r_len > 20) { DSA_SIG_free(sig); return -1; } s_len = BN_num_bytes(s); - if (s_len < 1 || s_len > 20) { + if(s_len < 1 || s_len > 20) { DSA_SIG_free(sig); return -1; } - memset(signature, 0, 40); + memset(signature, 0, 40); + + BN_bn2bin(r, signature + (20 - r_len)); + BN_bn2bin(s, signature + 20 + (20 - s_len)); + + DSA_SIG_free(sig); + + return 0; +} +#endif /* LIBSSH_DSA */ + +#if LIBSSH2_ECDSA + +int +_libssh2_ecdsa_sign(LIBSSH2_SESSION * session, libssh2_ecdsa_ctx * ec_ctx, + const unsigned char *hash, unsigned long hash_len, + unsigned char **signature, size_t *signature_len) +{ + int r_len, s_len; + int rc = 0; + size_t out_buffer_len = 0; + unsigned char *sp; + const BIGNUM *pr = NULL, *ps = NULL; + unsigned char *temp_buffer = NULL; + unsigned char *out_buffer = NULL; + + ECDSA_SIG *sig = ECDSA_do_sign(hash, hash_len, ec_ctx); + if(sig == NULL) + return -1; +#ifdef HAVE_OPAQUE_STRUCTS + ECDSA_SIG_get0(sig, &pr, &ps); +#else + pr = sig->r; + ps = sig->s; +#endif + + r_len = BN_num_bytes(pr) + 1; + s_len = BN_num_bytes(ps) + 1; + + temp_buffer = malloc(r_len + s_len + 8); + if(temp_buffer == NULL) { + rc = -1; + goto clean_exit; + } + + sp = temp_buffer; + sp = write_bn(sp, pr, r_len); + sp = write_bn(sp, ps, s_len); - BN_bn2bin(r, signature + (20 - r_len)); - BN_bn2bin(s, signature + 20 + (20 - s_len)); + out_buffer_len = (size_t)(sp - temp_buffer); - DSA_SIG_free(sig); + out_buffer = LIBSSH2_CALLOC(session, out_buffer_len); + if(out_buffer == NULL) { + rc = -1; + goto clean_exit; + } - return 0; + memcpy(out_buffer, temp_buffer, out_buffer_len); + + *signature = out_buffer; + *signature_len = out_buffer_len; + +clean_exit: + + if(temp_buffer != NULL) + free(temp_buffer); + + if(sig) + ECDSA_SIG_free(sig); + + return rc; } -#endif /* LIBSSH_DSA */ +#endif /* LIBSSH2_ECDSA */ int _libssh2_sha1_init(libssh2_sha1_ctx *ctx) @@ -688,10 +2079,10 @@ _libssh2_sha1_init(libssh2_sha1_ctx *ctx) #ifdef HAVE_OPAQUE_STRUCTS *ctx = EVP_MD_CTX_new(); - if (*ctx == NULL) + if(*ctx == NULL) return 0; - if (EVP_DigestInit(*ctx, EVP_get_digestbyname("sha1"))) + if(EVP_DigestInit(*ctx, EVP_get_digestbyname("sha1"))) return 1; EVP_MD_CTX_free(*ctx); @@ -711,10 +2102,10 @@ _libssh2_sha1(const unsigned char *message, unsigned long len, #ifdef HAVE_OPAQUE_STRUCTS EVP_MD_CTX * ctx = EVP_MD_CTX_new(); - if (ctx == NULL) + if(ctx == NULL) return 1; /* error */ - if (EVP_DigestInit(ctx, EVP_get_digestbyname("sha1"))) { + if(EVP_DigestInit(ctx, EVP_get_digestbyname("sha1"))) { EVP_DigestUpdate(ctx, message, len); EVP_DigestFinal(ctx, out, NULL); EVP_MD_CTX_free(ctx); @@ -725,7 +2116,7 @@ _libssh2_sha1(const unsigned char *message, unsigned long len, EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); - if (EVP_DigestInit(&ctx, EVP_get_digestbyname("sha1"))) { + if(EVP_DigestInit(&ctx, EVP_get_digestbyname("sha1"))) { EVP_DigestUpdate(&ctx, message, len); EVP_DigestFinal(&ctx, out, NULL); return 0; /* success */ @@ -740,10 +2131,10 @@ _libssh2_sha256_init(libssh2_sha256_ctx *ctx) #ifdef HAVE_OPAQUE_STRUCTS *ctx = EVP_MD_CTX_new(); - if (*ctx == NULL) + if(*ctx == NULL) return 0; - if (EVP_DigestInit(*ctx, EVP_get_digestbyname("sha256"))) + if(EVP_DigestInit(*ctx, EVP_get_digestbyname("sha256"))) return 1; EVP_MD_CTX_free(*ctx); @@ -763,7 +2154,7 @@ _libssh2_sha256(const unsigned char *message, unsigned long len, #ifdef HAVE_OPAQUE_STRUCTS EVP_MD_CTX * ctx = EVP_MD_CTX_new(); - if (ctx == NULL) + if(ctx == NULL) return 1; /* error */ if(EVP_DigestInit(ctx, EVP_get_digestbyname("sha256"))) { @@ -787,15 +2178,15 @@ _libssh2_sha256(const unsigned char *message, unsigned long len, } int -_libssh2_md5_init(libssh2_md5_ctx *ctx) +_libssh2_sha384_init(libssh2_sha384_ctx *ctx) { #ifdef HAVE_OPAQUE_STRUCTS *ctx = EVP_MD_CTX_new(); - if (*ctx == NULL) + if(*ctx == NULL) return 0; - if (EVP_DigestInit(*ctx, EVP_get_digestbyname("md5"))) + if(EVP_DigestInit(*ctx, EVP_get_digestbyname("sha384"))) return 1; EVP_MD_CTX_free(*ctx); @@ -804,237 +2195,814 @@ _libssh2_md5_init(libssh2_md5_ctx *ctx) return 0; #else EVP_MD_CTX_init(ctx); - return EVP_DigestInit(ctx, EVP_get_digestbyname("md5")); + return EVP_DigestInit(ctx, EVP_get_digestbyname("sha384")); #endif } -static unsigned char * -write_bn(unsigned char *buf, const BIGNUM *bn, int bn_bytes) +int +_libssh2_sha384(const unsigned char *message, unsigned long len, + unsigned char *out) { - unsigned char *p = buf; +#ifdef HAVE_OPAQUE_STRUCTS + EVP_MD_CTX * ctx = EVP_MD_CTX_new(); - /* Left space for bn size which will be written below. */ - p += 4; + if(ctx == NULL) + return 1; /* error */ - *p = 0; - BN_bn2bin(bn, p + 1); - if (!(*(p + 1) & 0x80)) { - memmove(p, p + 1, --bn_bytes); + if(EVP_DigestInit(ctx, EVP_get_digestbyname("sha384"))) { + EVP_DigestUpdate(ctx, message, len); + EVP_DigestFinal(ctx, out, NULL); + EVP_MD_CTX_free(ctx); + return 0; /* success */ } - _libssh2_htonu32(p - 4, bn_bytes); /* Post write bn size. */ + EVP_MD_CTX_free(ctx); +#else + EVP_MD_CTX ctx; - return p + bn_bytes; + EVP_MD_CTX_init(&ctx); + if(EVP_DigestInit(&ctx, EVP_get_digestbyname("sha384"))) { + EVP_DigestUpdate(&ctx, message, len); + EVP_DigestFinal(&ctx, out, NULL); + return 0; /* success */ + } +#endif + return 1; /* error */ } -static unsigned char * -gen_publickey_from_rsa(LIBSSH2_SESSION *session, RSA *rsa, - size_t *key_len) +int +_libssh2_sha512_init(libssh2_sha512_ctx *ctx) { - int e_bytes, n_bytes; - unsigned long len; - unsigned char* key; - unsigned char* p; - const BIGNUM * e; - const BIGNUM * n; #ifdef HAVE_OPAQUE_STRUCTS - RSA_get0_key(rsa, &n, &e, NULL); + *ctx = EVP_MD_CTX_new(); + + if(*ctx == NULL) + return 0; + + if(EVP_DigestInit(*ctx, EVP_get_digestbyname("sha512"))) + return 1; + + EVP_MD_CTX_free(*ctx); + *ctx = NULL; + + return 0; #else - e = rsa->e; - n = rsa->n; + EVP_MD_CTX_init(ctx); + return EVP_DigestInit(ctx, EVP_get_digestbyname("sha512")); #endif - e_bytes = BN_num_bytes(e) + 1; - n_bytes = BN_num_bytes(n) + 1; +} + +int +_libssh2_sha512(const unsigned char *message, unsigned long len, + unsigned char *out) +{ +#ifdef HAVE_OPAQUE_STRUCTS + EVP_MD_CTX * ctx = EVP_MD_CTX_new(); + + if(ctx == NULL) + return 1; /* error */ + + if(EVP_DigestInit(ctx, EVP_get_digestbyname("sha512"))) { + EVP_DigestUpdate(ctx, message, len); + EVP_DigestFinal(ctx, out, NULL); + EVP_MD_CTX_free(ctx); + return 0; /* success */ + } + EVP_MD_CTX_free(ctx); +#else + EVP_MD_CTX ctx; + + EVP_MD_CTX_init(&ctx); + if(EVP_DigestInit(&ctx, EVP_get_digestbyname("sha512"))) { + EVP_DigestUpdate(&ctx, message, len); + EVP_DigestFinal(&ctx, out, NULL); + return 0; /* success */ + } +#endif + return 1; /* error */ +} + +int +_libssh2_md5_init(libssh2_md5_ctx *ctx) +{ + /* MD5 digest is not supported in OpenSSL FIPS mode + * Trying to init it will result in a latent OpenSSL error: + * "digital envelope routines:FIPS_DIGESTINIT:disabled for fips" + * So, just return 0 in FIPS mode + */ +#if OPENSSL_VERSION_NUMBER >= 0x000907000L && \ + defined(OPENSSL_VERSION_MAJOR) && \ + OPENSSL_VERSION_MAJOR < 3 && \ + !defined(LIBRESSL_VERSION_NUMBER) + if(FIPS_mode() != 0) + return 0; +#endif + +#ifdef HAVE_OPAQUE_STRUCTS + *ctx = EVP_MD_CTX_new(); + + if(*ctx == NULL) + return 0; + + if(EVP_DigestInit(*ctx, EVP_get_digestbyname("md5"))) + return 1; + + EVP_MD_CTX_free(*ctx); + *ctx = NULL; + + return 0; +#else + EVP_MD_CTX_init(ctx); + return EVP_DigestInit(ctx, EVP_get_digestbyname("md5")); +#endif +} + +#if LIBSSH2_ECDSA + +static int +gen_publickey_from_ec_evp(LIBSSH2_SESSION *session, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + EVP_PKEY *pk) +{ + int rc = 0; + EC_KEY *ec = NULL; + unsigned char *p; + unsigned char *method_buf = NULL; + unsigned char *key; + size_t key_len = 0; + unsigned char *octal_value = NULL; + size_t octal_len; + const EC_POINT *public_key; + const EC_GROUP *group; + BN_CTX *bn_ctx; + libssh2_curve_type type; + + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing public key from EC private key envelope"); + + bn_ctx = BN_CTX_new(); + if(bn_ctx == NULL) + return -1; + + ec = EVP_PKEY_get1_EC_KEY(pk); + if(ec == NULL) { + rc = -1; + goto clean_exit; + } + + public_key = EC_KEY_get0_public_key(ec); + group = EC_KEY_get0_group(ec); + type = _libssh2_ecdsa_get_curve_type(ec); + + method_buf = LIBSSH2_ALLOC(session, 19); + if(method_buf == NULL) { + return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "out of memory"); + } + + if(type == LIBSSH2_EC_CURVE_NISTP256) + memcpy(method_buf, "ecdsa-sha2-nistp256", 19); + else if(type == LIBSSH2_EC_CURVE_NISTP384) + memcpy(method_buf, "ecdsa-sha2-nistp384", 19); + else if(type == LIBSSH2_EC_CURVE_NISTP521) + memcpy(method_buf, "ecdsa-sha2-nistp521", 19); + else { + _libssh2_debug(session, + LIBSSH2_TRACE_ERROR, + "Unsupported EC private key type"); + rc = -1; + goto clean_exit; + } + + /* get length */ + octal_len = EC_POINT_point2oct(group, public_key, + POINT_CONVERSION_UNCOMPRESSED, + NULL, 0, bn_ctx); + if(octal_len > EC_MAX_POINT_LEN) { + rc = -1; + goto clean_exit; + } + + octal_value = malloc(octal_len); + if(octal_value == NULL) { + rc = -1; + goto clean_exit; + } + + /* convert to octal */ + if(EC_POINT_point2oct(group, public_key, POINT_CONVERSION_UNCOMPRESSED, + octal_value, octal_len, bn_ctx) != octal_len) { + rc = -1; + goto clean_exit; + } + + /* Key form is: type_len(4) + type(19) + domain_len(4) + domain(8) + + pub_key_len(4) + pub_key(~65). */ + key_len = 4 + 19 + 4 + 8 + 4 + octal_len; + key = LIBSSH2_ALLOC(session, key_len); + if(key == NULL) { + rc = -1; + goto clean_exit; + } + + /* Process key encoding. */ + p = key; + + /* Key type */ + _libssh2_store_str(&p, (const char *)method_buf, 19); + + /* Name domain */ + _libssh2_store_str(&p, (const char *)method_buf + 11, 8); + + /* Public key */ + _libssh2_store_str(&p, (const char *)octal_value, octal_len); + + *method = method_buf; + *method_len = 19; + *pubkeydata = key; + *pubkeydata_len = key_len; + +clean_exit: + + if(ec != NULL) + EC_KEY_free(ec); + + if(bn_ctx != NULL) { + BN_CTX_free(bn_ctx); + } + + if(octal_value != NULL) + free(octal_value); + + if(rc == 0) + return 0; + + if(method_buf != NULL) + LIBSSH2_FREE(session, method_buf); + + return -1; +} + +static int +gen_publickey_from_ecdsa_openssh_priv_data(LIBSSH2_SESSION *session, + libssh2_curve_type curve_type, + struct string_buf *decrypted, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + libssh2_ecdsa_ctx **ec_ctx) +{ + int rc = 0; + size_t curvelen, exponentlen, pointlen; + unsigned char *curve, *exponent, *point_buf; + EC_KEY *ec_key = NULL; + BIGNUM *bn_exponent; + + _libssh2_debug(session, + LIBSSH2_TRACE_AUTH, + "Computing ECDSA keys from private key data"); + + if(_libssh2_get_string(decrypted, &curve, &curvelen) || + curvelen == 0) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "ECDSA no curve"); + return -1; + } + + if(_libssh2_get_string(decrypted, &point_buf, &pointlen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "ECDSA no point"); + return -1; + } + + if(_libssh2_get_bignum_bytes(decrypted, &exponent, &exponentlen)) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "ECDSA no exponent"); + return -1; + } + + if((rc = _libssh2_ecdsa_curve_name_with_octal_new(&ec_key, point_buf, + pointlen, curve_type)) != 0) { + rc = -1; + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "ECDSA could not create key"); + goto fail; + } + + bn_exponent = BN_new(); + if(bn_exponent == NULL) { + rc = -1; + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for private key data"); + goto fail; + } + + BN_bin2bn(exponent, exponentlen, bn_exponent); + rc = (EC_KEY_set_private_key(ec_key, bn_exponent) != 1); + + if(rc == 0 && ec_key != NULL && pubkeydata != NULL && method != NULL) { + EVP_PKEY *pk = EVP_PKEY_new(); + EVP_PKEY_set1_EC_KEY(pk, ec_key); + + rc = gen_publickey_from_ec_evp(session, method, method_len, + pubkeydata, pubkeydata_len, + pk); + + if(pk) + EVP_PKEY_free(pk); + } + + if(ec_ctx != NULL) + *ec_ctx = ec_key; + else + EC_KEY_free(ec_key); + + return rc; + +fail: + if(ec_key != NULL) + EC_KEY_free(ec_key); + + return rc; +} + +static int +_libssh2_ecdsa_new_openssh_private(libssh2_ecdsa_ctx ** ec_ctx, + LIBSSH2_SESSION * session, + const char *filename, + unsigned const char *passphrase) +{ + FILE *fp; + int rc; + unsigned char *buf = NULL; + libssh2_curve_type type; + struct string_buf *decrypted = NULL; + + if(session == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Session is required"); + return -1; + } + + _libssh2_init_if_needed(); + + fp = fopen(filename, "r"); + if(!fp) { + _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Unable to open OpenSSH ECDSA private key file"); + return -1; + } + + rc = _libssh2_openssh_pem_parse(session, passphrase, fp, &decrypted); + fclose(fp); + if(rc) { + return rc; + } + + /* We have a new key file, now try and parse it using supported types */ + rc = _libssh2_get_string(decrypted, &buf, NULL); + + if(rc != 0 || buf == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Public key type in decrypted key data not found"); + return -1; + } + + rc = _libssh2_ecdsa_curve_type_from_name((const char *)buf, &type); + + if(rc == 0) { + rc = gen_publickey_from_ecdsa_openssh_priv_data(session, type, + decrypted, NULL, 0, + NULL, 0, ec_ctx); + } + else { + rc = -1; + } + + if(decrypted) + _libssh2_string_buf_free(session, decrypted); + + return rc; +} + +int +_libssh2_ecdsa_new_private(libssh2_ecdsa_ctx ** ec_ctx, + LIBSSH2_SESSION * session, + const char *filename, unsigned const char *passphrase) +{ + int rc; + + pem_read_bio_func read_ec = (pem_read_bio_func) &PEM_read_bio_ECPrivateKey; + + _libssh2_init_if_needed(); + + rc = read_private_key_from_file((void **) ec_ctx, read_ec, + filename, passphrase); + + if(rc) { + return _libssh2_ecdsa_new_openssh_private(ec_ctx, session, + filename, passphrase); + } + + return rc; +} + +/* + * _libssh2_ecdsa_create_key + * + * Creates a local private key based on input curve + * and returns octal value and octal length + * + */ + +int +_libssh2_ecdsa_create_key(LIBSSH2_SESSION *session, + _libssh2_ec_key **out_private_key, + unsigned char **out_public_key_octal, + size_t *out_public_key_octal_len, + libssh2_curve_type curve_type) +{ + int ret = 1; + size_t octal_len = 0; + unsigned char octal_value[EC_MAX_POINT_LEN]; + const EC_POINT *public_key = NULL; + EC_KEY *private_key = NULL; + const EC_GROUP *group = NULL; + + /* create key */ + BN_CTX *bn_ctx = BN_CTX_new(); + if(!bn_ctx) + return -1; + + private_key = EC_KEY_new_by_curve_name(curve_type); + group = EC_KEY_get0_group(private_key); + + EC_KEY_generate_key(private_key); + public_key = EC_KEY_get0_public_key(private_key); + + /* get length */ + octal_len = EC_POINT_point2oct(group, public_key, + POINT_CONVERSION_UNCOMPRESSED, + NULL, 0, bn_ctx); + if(octal_len > EC_MAX_POINT_LEN) { + ret = -1; + goto clean_exit; + } + + /* convert to octal */ + if(EC_POINT_point2oct(group, public_key, POINT_CONVERSION_UNCOMPRESSED, + octal_value, octal_len, bn_ctx) != octal_len) { + ret = -1; + goto clean_exit; + } + + if(out_private_key != NULL) + *out_private_key = private_key; + + if(out_public_key_octal) { + *out_public_key_octal = LIBSSH2_ALLOC(session, octal_len); + if(*out_public_key_octal == NULL) { + ret = -1; + goto clean_exit; + } + + memcpy(*out_public_key_octal, octal_value, octal_len); + } + + if(out_public_key_octal_len != NULL) + *out_public_key_octal_len = octal_len; + +clean_exit: + + if(bn_ctx) + BN_CTX_free(bn_ctx); + + return (ret == 1) ? 0 : -1; +} + +/* _libssh2_ecdh_gen_k + * + * Computes the shared secret K given a local private key, + * remote public key and length + */ + +int +_libssh2_ecdh_gen_k(_libssh2_bn **k, _libssh2_ec_key *private_key, + const unsigned char *server_public_key, size_t server_public_key_len) +{ + int ret = 0; + int rc; + size_t secret_len; + unsigned char *secret = NULL; + const EC_GROUP *private_key_group; + EC_POINT *server_public_key_point; - /* Key form is "ssh-rsa" + e + n. */ - len = 4 + 7 + 4 + e_bytes + 4 + n_bytes; + BN_CTX *bn_ctx = BN_CTX_new(); - key = LIBSSH2_ALLOC(session, len); - if (key == NULL) { - return NULL; + if(!bn_ctx) + return -1; + + if(k == NULL) + return -1; + + private_key_group = EC_KEY_get0_group(private_key); + + server_public_key_point = EC_POINT_new(private_key_group); + if(server_public_key_point == NULL) + return -1; + + rc = EC_POINT_oct2point(private_key_group, server_public_key_point, + server_public_key, server_public_key_len, bn_ctx); + if(rc != 1) { + ret = -1; + goto clean_exit; } - /* Process key encoding. */ - p = key; + secret_len = (EC_GROUP_get_degree(private_key_group) + 7) / 8; + secret = malloc(secret_len); + if(!secret) { + ret = -1; + goto clean_exit; + } - _libssh2_htonu32(p, 7); /* Key type. */ - p += 4; - memcpy(p, "ssh-rsa", 7); - p += 7; + secret_len = ECDH_compute_key(secret, secret_len, server_public_key_point, + private_key, NULL); - p = write_bn(p, e, e_bytes); - p = write_bn(p, n, n_bytes); + if(secret_len <= 0 || secret_len > EC_MAX_POINT_LEN) { + ret = -1; + goto clean_exit; + } - *key_len = (size_t)(p - key); - return key; + BN_bin2bn(secret, secret_len, *k); + +clean_exit: + + if(server_public_key_point != NULL) + EC_POINT_free(server_public_key_point); + + if(bn_ctx != NULL) + BN_CTX_free(bn_ctx); + + if(secret != NULL) + free(secret); + + return ret; } -#if LIBSSH2_DSA -static unsigned char * -gen_publickey_from_dsa(LIBSSH2_SESSION* session, DSA *dsa, - size_t *key_len) + +#endif /* LIBSSH2_ECDSA */ + +#if LIBSSH2_ED25519 + +int +_libssh2_ed25519_sign(libssh2_ed25519_ctx *ctx, LIBSSH2_SESSION *session, + uint8_t **out_sig, size_t *out_sig_len, + const uint8_t *message, size_t message_len) { - int p_bytes, q_bytes, g_bytes, k_bytes; - unsigned long len; - unsigned char* key; - unsigned char* p; + int rc = -1; + EVP_MD_CTX *md_ctx = EVP_MD_CTX_new(); + size_t sig_len = 0; + unsigned char *sig = NULL; - const BIGNUM * p_bn; - const BIGNUM * q; - const BIGNUM * g; - const BIGNUM * pub_key; -#ifdef HAVE_OPAQUE_STRUCTS - DSA_get0_pqg(dsa, &p_bn, &q, &g); -#else - p_bn = dsa->p; - q = dsa->q; - g = dsa->g; -#endif + if(md_ctx != NULL) { + if(EVP_DigestSignInit(md_ctx, NULL, NULL, NULL, ctx) != 1) + goto clean_exit; + if(EVP_DigestSign(md_ctx, NULL, &sig_len, message, message_len) != 1) + goto clean_exit; -#ifdef HAVE_OPAQUE_STRUCTS - DSA_get0_key(dsa, &pub_key, NULL); -#else - pub_key = dsa->pub_key; -#endif - p_bytes = BN_num_bytes(p_bn) + 1; - q_bytes = BN_num_bytes(q) + 1; - g_bytes = BN_num_bytes(g) + 1; - k_bytes = BN_num_bytes(pub_key) + 1; + if(sig_len != LIBSSH2_ED25519_SIG_LEN) + goto clean_exit; - /* Key form is "ssh-dss" + p + q + g + pub_key. */ - len = 4 + 7 + 4 + p_bytes + 4 + q_bytes + 4 + g_bytes + 4 + k_bytes; + sig = LIBSSH2_CALLOC(session, sig_len); + if(sig == NULL) + goto clean_exit; - key = LIBSSH2_ALLOC(session, len); - if (key == NULL) { - return NULL; + rc = EVP_DigestSign(md_ctx, sig, &sig_len, message, message_len); } - /* Process key encoding. */ - p = key; + if(rc == 1) { + *out_sig = sig; + *out_sig_len = sig_len; + } + else { + *out_sig_len = 0; + *out_sig = NULL; + LIBSSH2_FREE(session, sig); + } - _libssh2_htonu32(p, 7); /* Key type. */ - p += 4; - memcpy(p, "ssh-dss", 7); - p += 7; +clean_exit: - p = write_bn(p, p_bn, p_bytes); - p = write_bn(p, q, q_bytes); - p = write_bn(p, g, g_bytes); - p = write_bn(p, pub_key, k_bytes); + if(md_ctx) + EVP_MD_CTX_free(md_ctx); - *key_len = (size_t)(p - key); - return key; + return (rc == 1 ? 0 : -1); } -#endif /* LIBSSH_DSA */ -static int -gen_publickey_from_rsa_evp(LIBSSH2_SESSION *session, - unsigned char **method, - size_t *method_len, - unsigned char **pubkeydata, - size_t *pubkeydata_len, - EVP_PKEY *pk) +int +_libssh2_curve25519_gen_k(_libssh2_bn **k, + uint8_t private_key[LIBSSH2_ED25519_KEY_LEN], + uint8_t server_public_key[LIBSSH2_ED25519_KEY_LEN]) { - RSA* rsa = NULL; - unsigned char* key; - unsigned char* method_buf = NULL; - size_t key_len; + int rc = -1; + unsigned char out_shared_key[LIBSSH2_ED25519_KEY_LEN]; + EVP_PKEY *peer_key = NULL, *server_key = NULL; + EVP_PKEY_CTX *server_key_ctx = NULL; + BN_CTX *bn_ctx = NULL; + size_t out_len = 0; + + if(k == NULL || *k == NULL) + return -1; - _libssh2_debug(session, - LIBSSH2_TRACE_AUTH, - "Computing public key from RSA private key envelop"); + bn_ctx = BN_CTX_new(); + if(bn_ctx == NULL) + return -1; - rsa = EVP_PKEY_get1_RSA(pk); - if (rsa == NULL) { - /* Assume memory allocation error... what else could it be ? */ - goto __alloc_error; + peer_key = EVP_PKEY_new_raw_public_key(EVP_PKEY_X25519, NULL, + server_public_key, + LIBSSH2_ED25519_KEY_LEN); + + server_key = EVP_PKEY_new_raw_private_key(EVP_PKEY_X25519, NULL, + private_key, + LIBSSH2_ED25519_KEY_LEN); + + if(peer_key == NULL || server_key == NULL) { + goto cleanExit; } - method_buf = LIBSSH2_ALLOC(session, 7); /* ssh-rsa. */ - if (method_buf == NULL) { - goto __alloc_error; + server_key_ctx = EVP_PKEY_CTX_new(server_key, NULL); + if(server_key_ctx == NULL) { + goto cleanExit; } - key = gen_publickey_from_rsa(session, rsa, &key_len); - if (key == NULL) { - goto __alloc_error; + rc = EVP_PKEY_derive_init(server_key_ctx); + if(rc <= 0) goto cleanExit; + + rc = EVP_PKEY_derive_set_peer(server_key_ctx, peer_key); + if(rc <= 0) goto cleanExit; + + rc = EVP_PKEY_derive(server_key_ctx, NULL, &out_len); + if(rc <= 0) goto cleanExit; + + if(out_len != LIBSSH2_ED25519_KEY_LEN) { + rc = -1; + goto cleanExit; } - RSA_free(rsa); - memcpy(method_buf, "ssh-rsa", 7); - *method = method_buf; - *method_len = 7; - *pubkeydata = key; - *pubkeydata_len = key_len; - return 0; + rc = EVP_PKEY_derive(server_key_ctx, out_shared_key, &out_len); - __alloc_error: - if (rsa != NULL) { - RSA_free(rsa); + if(rc == 1 && out_len == LIBSSH2_ED25519_KEY_LEN) { + BN_bin2bn(out_shared_key, LIBSSH2_ED25519_KEY_LEN, *k); } - if (method_buf != NULL) { - LIBSSH2_FREE(session, method_buf); + else { + rc = -1; } - return _libssh2_error(session, - LIBSSH2_ERROR_ALLOC, - "Unable to allocate memory for private key data"); +cleanExit: + + if(server_key_ctx) + EVP_PKEY_CTX_free(server_key_ctx); + if(peer_key) + EVP_PKEY_free(peer_key); + if(server_key) + EVP_PKEY_free(server_key); + if(bn_ctx != NULL) + BN_CTX_free(bn_ctx); + + return (rc == 1) ? 0 : -1; } -#if LIBSSH2_DSA + +int +_libssh2_ed25519_verify(libssh2_ed25519_ctx *ctx, const uint8_t *s, + size_t s_len, const uint8_t *m, size_t m_len) +{ + int ret = -1; + + EVP_MD_CTX *md_ctx = EVP_MD_CTX_new(); + if(NULL == md_ctx) + return -1; + + ret = EVP_DigestVerifyInit(md_ctx, NULL, NULL, NULL, ctx); + if(ret != 1) + goto clean_exit; + + ret = EVP_DigestVerify(md_ctx, s, s_len, m, m_len); + + clean_exit: + + EVP_MD_CTX_free(md_ctx); + + return (ret == 1) ? 0 : -1; +} + +#endif /* LIBSSH2_ED25519 */ + static int -gen_publickey_from_dsa_evp(LIBSSH2_SESSION *session, - unsigned char **method, - size_t *method_len, - unsigned char **pubkeydata, - size_t *pubkeydata_len, - EVP_PKEY *pk) +_libssh2_pub_priv_openssh_keyfile(LIBSSH2_SESSION *session, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + const char *privatekey, + const char *passphrase) { - DSA* dsa = NULL; - unsigned char* key; - unsigned char* method_buf = NULL; - size_t key_len; + FILE *fp; + unsigned char *buf = NULL; + struct string_buf *decrypted = NULL; + int rc = 0; + + if(session == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Session is required"); + return -1; + } - _libssh2_debug(session, - LIBSSH2_TRACE_AUTH, - "Computing public key from DSA private key envelop"); + _libssh2_init_if_needed(); - dsa = EVP_PKEY_get1_DSA(pk); - if (dsa == NULL) { - /* Assume memory allocation error... what else could it be ? */ - goto __alloc_error; + fp = fopen(privatekey, "r"); + if(!fp) { + _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Unable to open private key file"); + return -1; } - method_buf = LIBSSH2_ALLOC(session, 7); /* ssh-dss. */ - if (method_buf == NULL) { - goto __alloc_error; + rc = _libssh2_openssh_pem_parse(session, (const unsigned char *)passphrase, + fp, &decrypted); + fclose(fp); + if(rc) { + _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Not an OpenSSH key file"); + return rc; } - key = gen_publickey_from_dsa(session, dsa, &key_len); - if (key == NULL) { - goto __alloc_error; + /* We have a new key file, now try and parse it using supported types */ + rc = _libssh2_get_string(decrypted, &buf, NULL); + + if(rc != 0 || buf == NULL) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Public key type in decrypted key data not found"); + return -1; } - DSA_free(dsa); - memcpy(method_buf, "ssh-dss", 7); - *method = method_buf; - *method_len = 7; - *pubkeydata = key; - *pubkeydata_len = key_len; - return 0; + rc = -1; - __alloc_error: - if (dsa != NULL) { - DSA_free(dsa); +#if LIBSSH2_ED25519 + if(strcmp("ssh-ed25519", (const char *)buf) == 0) { + rc = gen_publickey_from_ed25519_openssh_priv_data(session, decrypted, + method, method_len, + pubkeydata, + pubkeydata_len, + NULL); } - if (method_buf != NULL) { - LIBSSH2_FREE(session, method_buf); +#endif +#if LIBSSH2_RSA + if(strcmp("ssh-rsa", (const char *)buf) == 0) { + rc = gen_publickey_from_rsa_openssh_priv_data(session, decrypted, + method, method_len, + pubkeydata, + pubkeydata_len, + NULL); + } +#endif +#if LIBSSH2_DSA + if(strcmp("ssh-dss", (const char *)buf) == 0) { + rc = gen_publickey_from_dsa_openssh_priv_data(session, decrypted, + method, method_len, + pubkeydata, + pubkeydata_len, + NULL); + } +#endif +#if LIBSSH2_ECDSA + { + libssh2_curve_type type; + + if(_libssh2_ecdsa_curve_type_from_name((const char *)buf, + &type) == 0) { + rc = gen_publickey_from_ecdsa_openssh_priv_data(session, type, + decrypted, + method, method_len, + pubkeydata, + pubkeydata_len, + NULL); + } } +#endif - return _libssh2_error(session, - LIBSSH2_ERROR_ALLOC, - "Unable to allocate memory for private key data"); + if(decrypted) + _libssh2_string_buf_free(session, decrypted); + + if(rc != 0) { + _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Unsupported OpenSSH key type"); + } + + return rc; } -#endif /* LIBSSH_DSA */ int _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, @@ -1049,6 +3017,7 @@ _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, BIO* bp; EVP_PKEY* pk; int pktype; + int rc; _libssh2_debug(session, LIBSSH2_TRACE_AUTH, @@ -1056,31 +3025,35 @@ _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, privatekey); bp = BIO_new_file(privatekey, "r"); - if (bp == NULL) { + if(bp == NULL) { return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Unable to extract public key from private key " "file: Unable to open private key file"); } - if (!EVP_get_cipherbyname("des")) { - /* If this cipher isn't loaded it's a pretty good indication that none - * are. I have *NO DOUBT* that there's a better way to deal with this - * ($#&%#$(%$#( Someone buy me an OpenSSL manual and I'll read up on - * it. - */ - OpenSSL_add_all_ciphers(); - } + BIO_reset(bp); - pk = PEM_read_bio_PrivateKey(bp, NULL, NULL, (void*)passphrase); + pk = PEM_read_bio_PrivateKey(bp, NULL, NULL, (void *)passphrase); BIO_free(bp); - if (pk == NULL) { - return _libssh2_error(session, - LIBSSH2_ERROR_FILE, - "Unable to extract public key " - "from private key file: " - "Wrong passphrase or invalid/unrecognized " - "private key file format"); + if(pk == NULL) { + + /* Try OpenSSH format */ + rc = _libssh2_pub_priv_openssh_keyfile(session, + method, + method_len, + pubkeydata, pubkeydata_len, + privatekey, passphrase); + if(rc != 0) { + return _libssh2_error(session, + LIBSSH2_ERROR_FILE, + "Unable to extract public key " + "from private key file: " + "Wrong passphrase or invalid/unrecognized " + "private key file format"); + } + + return 0; } #ifdef HAVE_OPAQUE_STRUCTS @@ -1089,7 +3062,13 @@ _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, pktype = pk->type; #endif - switch (pktype) { + switch(pktype) { +#if LIBSSH2_ED25519 + case EVP_PKEY_ED25519 : + st = gen_publickey_from_ed_evp( + session, method, method_len, pubkeydata, pubkeydata_len, pk); + break; +#endif /* LIBSSH2_ED25519 */ case EVP_PKEY_RSA : st = gen_publickey_from_rsa_evp( session, method, method_len, pubkeydata, pubkeydata_len, pk); @@ -1102,6 +3081,13 @@ _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, break; #endif /* LIBSSH_DSA */ +#if LIBSSH2_ECDSA + case EVP_PKEY_EC : + st = gen_publickey_from_ec_evp( + session, method, method_len, pubkeydata, pubkeydata_len, pk); + break; +#endif + default : st = _libssh2_error(session, LIBSSH2_ERROR_FILE, @@ -1115,6 +3101,128 @@ _libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, return st; } +static int +_libssh2_pub_priv_openssh_keyfilememory(LIBSSH2_SESSION *session, + void **key_ctx, + const char *key_type, + unsigned char **method, + size_t *method_len, + unsigned char **pubkeydata, + size_t *pubkeydata_len, + const char *privatekeydata, + size_t privatekeydata_len, + unsigned const char *passphrase) +{ + int rc; + unsigned char *buf = NULL; + struct string_buf *decrypted = NULL; + + if(key_ctx != NULL) + *key_ctx = NULL; + + if(session == NULL) + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Session is required"); + + if(key_type != NULL && (strlen(key_type) > 11 || strlen(key_type) < 7)) + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "type is invalid"); + + _libssh2_init_if_needed(); + + rc = _libssh2_openssh_pem_parse_memory(session, passphrase, + privatekeydata, + privatekeydata_len, &decrypted); + + if(rc) + return rc; + + /* We have a new key file, now try and parse it using supported types */ + rc = _libssh2_get_string(decrypted, &buf, NULL); + + if(rc != 0 || buf == NULL) + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Public key type in decrypted " + "key data not found"); + + rc = LIBSSH2_ERROR_FILE; + +#if LIBSSH2_ED25519 + if(strcmp("ssh-ed25519", (const char *)buf) == 0) { + if(key_type == NULL || strcmp("ssh-ed25519", key_type) == 0) { + rc = gen_publickey_from_ed25519_openssh_priv_data(session, + decrypted, + method, + method_len, + pubkeydata, + pubkeydata_len, + (libssh2_ed25519_ctx**)key_ctx); + } + } +#endif +#if LIBSSH2_RSA + if(strcmp("ssh-rsa", (const char *)buf) == 0) { + if(key_type == NULL || strcmp("ssh-rsa", key_type) == 0) { + rc = gen_publickey_from_rsa_openssh_priv_data(session, decrypted, + method, method_len, + pubkeydata, + pubkeydata_len, + (libssh2_rsa_ctx**)key_ctx); + } + } +#endif +#if LIBSSH2_DSA + if(strcmp("ssh-dss", (const char *)buf) == 0) { + if(key_type == NULL || strcmp("ssh-dss", key_type) == 0) { + rc = gen_publickey_from_dsa_openssh_priv_data(session, decrypted, + method, method_len, + pubkeydata, + pubkeydata_len, + (libssh2_dsa_ctx**)key_ctx); + } + } +#endif +#if LIBSSH2_ECDSA +{ + libssh2_curve_type type; + + if(_libssh2_ecdsa_curve_type_from_name((const char *)buf, &type) == 0) { + if(key_type == NULL || strcmp("ssh-ecdsa", key_type) == 0) { + rc = gen_publickey_from_ecdsa_openssh_priv_data(session, type, + decrypted, + method, method_len, + pubkeydata, + pubkeydata_len, + (libssh2_ecdsa_ctx**)key_ctx); + } + } +} +#endif + + if(rc == LIBSSH2_ERROR_FILE) + rc = _libssh2_error(session, LIBSSH2_ERROR_FILE, + "Unable to extract public key from private key file: " + "invalid/unrecognized private key file format"); + + if(decrypted) + _libssh2_string_buf_free(session, decrypted); + + return rc; +} + +int +read_openssh_private_key_from_memory(void **key_ctx, LIBSSH2_SESSION *session, + const char *key_type, + const char *filedata, + size_t filedata_len, + unsigned const char *passphrase) +{ + return _libssh2_pub_priv_openssh_keyfilememory(session, key_ctx, key_type, + NULL, NULL, NULL, NULL, + filedata, filedata_len, + passphrase); +} + int _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, unsigned char **method, @@ -1135,28 +3243,27 @@ _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, "Computing public key from private key."); bp = BIO_new_mem_buf((char *)privatekeydata, privatekeydata_len); - if (!bp) { - return -1; - } - if (!EVP_get_cipherbyname("des")) { - /* If this cipher isn't loaded it's a pretty good indication that none - * are. I have *NO DOUBT* that there's a better way to deal with this - * ($#&%#$(%$#( Someone buy me an OpenSSL manual and I'll read up on - * it. - */ - OpenSSL_add_all_ciphers(); - } + if(!bp) + return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory when" + "computing public key"); BIO_reset(bp); - pk = PEM_read_bio_PrivateKey(bp, NULL, NULL, (void*)passphrase); + pk = PEM_read_bio_PrivateKey(bp, NULL, NULL, (void *)passphrase); BIO_free(bp); - if (pk == NULL) { - return _libssh2_error(session, - LIBSSH2_ERROR_FILE, - "Unable to extract public key " - "from private key file: " - "Wrong passphrase or invalid/unrecognized " - "private key file format"); + if(pk == NULL) { + /* Try OpenSSH format */ + st = _libssh2_pub_priv_openssh_keyfilememory(session, NULL, NULL, + method, + method_len, + pubkeydata, + pubkeydata_len, + privatekeydata, + privatekeydata_len, + (unsigned const char *)passphrase); + if(st != 0) + return st; + return 0; } #ifdef HAVE_OPAQUE_STRUCTS @@ -1165,7 +3272,13 @@ _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, pktype = pk->type; #endif - switch (pktype) { + switch(pktype) { +#if LIBSSH2_ED25519 + case EVP_PKEY_ED25519 : + st = gen_publickey_from_ed_evp( + session, method, method_len, pubkeydata, pubkeydata_len, pk); + break; +#endif /* LIBSSH2_ED25519 */ case EVP_PKEY_RSA : st = gen_publickey_from_rsa_evp(session, method, method_len, pubkeydata, pubkeydata_len, pk); @@ -1176,6 +3289,12 @@ _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, pubkeydata, pubkeydata_len, pk); break; #endif /* LIBSSH_DSA */ +#if LIBSSH2_ECDSA + case EVP_PKEY_EC : + st = gen_publickey_from_ec_evp(session, method, method_len, + pubkeydata, pubkeydata_len, pk); + break; +#endif /* LIBSSH2_ECDSA */ default : st = _libssh2_error(session, LIBSSH2_ERROR_FILE, @@ -1189,4 +3308,61 @@ _libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, return st; } +void +_libssh2_dh_init(_libssh2_dh_ctx *dhctx) +{ + *dhctx = BN_new(); /* Random from client */ +} + +int +_libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, int group_order, + _libssh2_bn_ctx *bnctx) +{ + /* Generate x and e */ + BN_rand(*dhctx, group_order * 8 - 1, 0, -1); + BN_mod_exp(public, g, *dhctx, p, bnctx); + return 0; +} + +int +_libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p, + _libssh2_bn_ctx *bnctx) +{ + /* Compute the shared secret */ + BN_mod_exp(secret, f, *dhctx, p, bnctx); + return 0; +} + +void +_libssh2_dh_dtor(_libssh2_dh_ctx *dhctx) +{ + BN_clear_free(*dhctx); + *dhctx = NULL; +} + +/* _libssh2_supported_key_sign_algorithms + * + * Return supported key hash algo upgrades, see crypto.h + * + */ + +const char * +_libssh2_supported_key_sign_algorithms(LIBSSH2_SESSION *session, + unsigned char *key_method, + size_t key_method_len) +{ + (void)session; + +#if LIBSSH2_RSA_SHA2 + if(key_method_len == 7 && + memcmp(key_method, "ssh-rsa", key_method_len) == 0) { + return "rsa-sha2-512,rsa-sha2-256,ssh-rsa"; + } +#endif + + return NULL; +} + #endif /* LIBSSH2_OPENSSL */ diff --git a/vendor/libssh2/src/openssl.h b/vendor/libssh2/src/openssl.h index 3ca71fa8a4..2a002b41e5 100644 --- a/vendor/libssh2/src/openssl.h +++ b/vendor/libssh2/src/openssl.h @@ -1,3 +1,5 @@ +#ifndef __LIBSSH2_OPENSSL_H +#define __LIBSSH2_OPENSSL_H /* Copyright (C) 2009, 2010 Simon Josefsson * Copyright (C) 2006, 2007 The Written Word, Inc. All rights reserved. * @@ -40,7 +42,9 @@ #include #include #include +#ifndef OPENSSL_NO_ENGINE #include +#endif #ifndef OPENSSL_NO_DSA #include #endif @@ -60,8 +64,10 @@ #ifdef OPENSSL_NO_RSA # define LIBSSH2_RSA 0 +# define LIBSSH2_RSA_SHA2 0 #else # define LIBSSH2_RSA 1 +# define LIBSSH2_RSA_SHA2 1 #endif #ifdef OPENSSL_NO_DSA @@ -70,6 +76,20 @@ # define LIBSSH2_DSA 1 #endif +#ifdef OPENSSL_NO_ECDSA +# define LIBSSH2_ECDSA 0 +#else +# define LIBSSH2_ECDSA 1 +#endif + +#if OPENSSL_VERSION_NUMBER >= 0x10101000L && \ +!defined(LIBRESSL_VERSION_NUMBER) +# define LIBSSH2_ED25519 1 +#else +# define LIBSSH2_ED25519 0 +#endif + + #ifdef OPENSSL_NO_MD5 # define LIBSSH2_MD5 0 #else @@ -117,7 +137,9 @@ # define LIBSSH2_3DES 1 #endif -#define _libssh2_random(buf, len) RAND_bytes ((buf), (len)) +#define EC_MAX_POINT_LEN ((528 * 2 / 8) + 1) + +#define _libssh2_random(buf, len) (RAND_bytes((buf), (len)) == 1 ? 0 : -1) #define libssh2_prepare_iovec(vec, len) /* Empty. */ @@ -160,13 +182,62 @@ int _libssh2_sha256_init(libssh2_sha256_ctx *ctx); EVP_MD_CTX_free(ctx); \ } while(0) #else -#define libssh2_sha256_update(ctx, data, len) EVP_DigestUpdate(&(ctx), data, len) +#define libssh2_sha256_update(ctx, data, len) \ + EVP_DigestUpdate(&(ctx), data, len) #define libssh2_sha256_final(ctx, out) EVP_DigestFinal(&(ctx), out, NULL) #endif int _libssh2_sha256(const unsigned char *message, unsigned long len, unsigned char *out); #define libssh2_sha256(x,y,z) _libssh2_sha256(x,y,z) +#ifdef HAVE_OPAQUE_STRUCTS +#define libssh2_sha384_ctx EVP_MD_CTX * +#else +#define libssh2_sha384_ctx EVP_MD_CTX +#endif + +/* returns 0 in case of failure */ +int _libssh2_sha384_init(libssh2_sha384_ctx *ctx); +#define libssh2_sha384_init(x) _libssh2_sha384_init(x) +#ifdef HAVE_OPAQUE_STRUCTS +#define libssh2_sha384_update(ctx, data, len) EVP_DigestUpdate(ctx, data, len) +#define libssh2_sha384_final(ctx, out) do { \ + EVP_DigestFinal(ctx, out, NULL); \ + EVP_MD_CTX_free(ctx); \ + } while(0) +#else +#define libssh2_sha384_update(ctx, data, len) \ + EVP_DigestUpdate(&(ctx), data, len) +#define libssh2_sha384_final(ctx, out) EVP_DigestFinal(&(ctx), out, NULL) +#endif +int _libssh2_sha384(const unsigned char *message, unsigned long len, + unsigned char *out); +#define libssh2_sha384(x,y,z) _libssh2_sha384(x,y,z) + +#ifdef HAVE_OPAQUE_STRUCTS +#define libssh2_sha512_ctx EVP_MD_CTX * +#else +#define libssh2_sha512_ctx EVP_MD_CTX +#endif + +/* returns 0 in case of failure */ +int _libssh2_sha512_init(libssh2_sha512_ctx *ctx); +#define libssh2_sha512_init(x) _libssh2_sha512_init(x) +#ifdef HAVE_OPAQUE_STRUCTS +#define libssh2_sha512_update(ctx, data, len) EVP_DigestUpdate(ctx, data, len) +#define libssh2_sha512_final(ctx, out) do { \ + EVP_DigestFinal(ctx, out, NULL); \ + EVP_MD_CTX_free(ctx); \ + } while(0) +#else +#define libssh2_sha512_update(ctx, data, len) \ + EVP_DigestUpdate(&(ctx), data, len) +#define libssh2_sha512_final(ctx, out) EVP_DigestFinal(&(ctx), out, NULL) +#endif +int _libssh2_sha512(const unsigned char *message, unsigned long len, + unsigned char *out); +#define libssh2_sha512(x,y,z) _libssh2_sha512(x,y,z) + #ifdef HAVE_OPAQUE_STRUCTS #define libssh2_md5_ctx EVP_MD_CTX * #else @@ -226,12 +297,10 @@ int _libssh2_md5_init(libssh2_md5_ctx *ctx); #define libssh2_hmac_cleanup(ctx) HMAC_cleanup(ctx) #endif -#define libssh2_crypto_init() \ - OpenSSL_add_all_algorithms(); \ - ENGINE_load_builtin_engines(); \ - ENGINE_register_all_complete() - -#define libssh2_crypto_exit() +extern void _libssh2_openssl_crypto_init(void); +extern void _libssh2_openssl_crypto_exit(void); +#define libssh2_crypto_init() _libssh2_openssl_crypto_init() +#define libssh2_crypto_exit() _libssh2_openssl_crypto_exit() #define libssh2_rsa_ctx RSA @@ -239,9 +308,29 @@ int _libssh2_md5_init(libssh2_md5_ctx *ctx); #define libssh2_dsa_ctx DSA - #define _libssh2_dsa_free(dsactx) DSA_free(dsactx) +#if LIBSSH2_ECDSA +#define libssh2_ecdsa_ctx EC_KEY +#define _libssh2_ecdsa_free(ecdsactx) EC_KEY_free(ecdsactx) +#define _libssh2_ec_key EC_KEY + +typedef enum { + LIBSSH2_EC_CURVE_NISTP256 = NID_X9_62_prime256v1, + LIBSSH2_EC_CURVE_NISTP384 = NID_secp384r1, + LIBSSH2_EC_CURVE_NISTP521 = NID_secp521r1 +} +libssh2_curve_type; +#else +#define _libssh2_ec_key void +#endif /* LIBSSH2_ECDSA */ + +#if LIBSSH2_ED25519 +#define libssh2_ed25519_ctx EVP_PKEY + +#define _libssh2_ed25519_free(ctx) EVP_PKEY_free(ctx) +#endif /* ED25519 */ + #define _libssh2_cipher_type(name) const EVP_CIPHER *(*name)(void) #ifdef HAVE_OPAQUE_STRUCTS #define _libssh2_cipher_ctx EVP_CIPHER_CTX * @@ -267,7 +356,7 @@ int _libssh2_md5_init(libssh2_md5_ctx *ctx); #define _libssh2_cipher_3des EVP_des_ede3_cbc #ifdef HAVE_OPAQUE_STRUCTS -#define _libssh2_cipher_dtor(ctx) EVP_CIPHER_CTX_reset(*(ctx)) +#define _libssh2_cipher_dtor(ctx) EVP_CIPHER_CTX_free(*(ctx)) #else #define _libssh2_cipher_dtor(ctx) EVP_CIPHER_CTX_cleanup(ctx) #endif @@ -278,8 +367,6 @@ int _libssh2_md5_init(libssh2_md5_ctx *ctx); #define _libssh2_bn_ctx_free(bnctx) BN_CTX_free(bnctx) #define _libssh2_bn_init() BN_new() #define _libssh2_bn_init_from_bin() _libssh2_bn_init() -#define _libssh2_bn_rand(bn, bits, top, bottom) BN_rand(bn, bits, top, bottom) -#define _libssh2_bn_mod_exp(r, a, p, m, ctx) BN_mod_exp(r, a, p, m, ctx) #define _libssh2_bn_set_word(bn, val) BN_set_word(bn, val) #define _libssh2_bn_from_bin(bn, len, val) BN_bin2bn(val, len, bn) #define _libssh2_bn_to_bin(bn, val) BN_bn2bin(bn, val) @@ -287,7 +374,25 @@ int _libssh2_md5_init(libssh2_md5_ctx *ctx); #define _libssh2_bn_bits(bn) BN_num_bits(bn) #define _libssh2_bn_free(bn) BN_clear_free(bn) +#define _libssh2_dh_ctx BIGNUM * +#define libssh2_dh_init(dhctx) _libssh2_dh_init(dhctx) +#define libssh2_dh_key_pair(dhctx, public, g, p, group_order, bnctx) \ + _libssh2_dh_key_pair(dhctx, public, g, p, group_order, bnctx) +#define libssh2_dh_secret(dhctx, secret, f, p, bnctx) \ + _libssh2_dh_secret(dhctx, secret, f, p, bnctx) +#define libssh2_dh_dtor(dhctx) _libssh2_dh_dtor(dhctx) +extern void _libssh2_dh_init(_libssh2_dh_ctx *dhctx); +extern int _libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, + int group_order, + _libssh2_bn_ctx *bnctx); +extern int _libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p, + _libssh2_bn_ctx *bnctx); +extern void _libssh2_dh_dtor(_libssh2_dh_ctx *dhctx); + const EVP_CIPHER *_libssh2_EVP_aes_128_ctr(void); const EVP_CIPHER *_libssh2_EVP_aes_192_ctr(void); const EVP_CIPHER *_libssh2_EVP_aes_256_ctr(void); +#endif /* __LIBSSH2_OPENSSL_H */ diff --git a/vendor/libssh2/src/os400qc3.c b/vendor/libssh2/src/os400qc3.c deleted file mode 100644 index f8e46aba93..0000000000 --- a/vendor/libssh2/src/os400qc3.c +++ /dev/null @@ -1,2513 +0,0 @@ -/* - * Copyright (C) 2015 Patrick Monnerat, D+H - * All rights reserved. - * - * Redistribution and use in source and binary forms, - * with or without modification, are permitted provided - * that the following conditions are met: - * - * Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * Neither the name of the copyright holder nor the names - * of any other contributors may be used to endorse or - * promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. - */ - -#include "libssh2_priv.h" - -#ifdef LIBSSH2_OS400QC3 /* compile only if we build with OS/400 QC3 library */ - -#ifdef HAVE_STDLIB_H -#include -#endif - -#include -#include -#include -#include - -#include - - -#ifdef OS400_DEBUG -/* In debug mode, all system library errors cause an exception. */ -#define set_EC_length(ec, length) ((ec).Bytes_Provided = \ - (ec).Bytes_Available = 0) -#else -#define set_EC_length(ec, length) ((ec).Bytes_Provided = (length)) -#endif - - -/* Ensure va_list operations are not on an array. */ -typedef struct { - va_list list; -} valiststr; - - -typedef int (*loadkeyproc)(LIBSSH2_SESSION *session, - const unsigned char *data, unsigned int datalen, - const unsigned char *passphrase, void *loadkeydata); - -/* Public key extraction data. */ -typedef struct { - const char * method; - const unsigned char * data; - unsigned int length; -} loadpubkeydata; - - -/* Support for ASN.1 elements. */ - -typedef struct { - char * header; /* Pointer to header byte. */ - char * beg; /* Pointer to element data. */ - char * end; /* Pointer to 1st byte after element. */ - unsigned char class; /* ASN.1 element class. */ - unsigned char tag; /* ASN.1 element tag. */ - unsigned char constructed; /* Element is constructed. */ -} asn1Element; - -#define ASN1_INTEGER 2 -#define ASN1_BIT_STRING 3 -#define ASN1_OCTET_STRING 4 -#define ASN1_NULL 5 -#define ASN1_OBJ_ID 6 -#define ASN1_SEQ 16 - -#define ASN1_CONSTRUCTED 0x20 - -/* rsaEncryption OID: 1.2.840.113549.1.1.1 */ -static unsigned char OID_rsaEncryption[] = - {9, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 1, 1, 1}; -static int sshrsapubkey(LIBSSH2_SESSION *session, char **sshpubkey, - asn1Element *params, asn1Element *key, - const char *method); - -#if LIBSSH2_DSA != 0 -/* dsaEncryption OID: 1.2.840.10040.4.1 */ -static unsigned char OID_dsaEncryption[] = - {7, 40 + 2, 0x86, 0x48, 0xCE, 0x38, 4, 1}; -static int sshdsapubkey(LIBSSH2_SESSION *session, char **sshpubkey, - asn1Element *params, asn1Element *key, - const char *method); -#endif - - -/* PKCS#5 support. */ - -typedef struct pkcs5params pkcs5params; -struct pkcs5params { - int cipher; /* Encryption cipher. */ - int blocksize; /* Cipher block size. */ - char mode; /* Block encryption mode. */ - char padopt; /* Pad option. */ - char padchar; /* Pad character. */ - int (*kdf)(LIBSSH2_SESSION *session, char **dk, - const unsigned char * passphrase, pkcs5params *pkcs5); - int hash; /* KDF hash algorithm. */ - size_t hashlen; /* KDF hash digest length. */ - char * salt; /* Salt. */ - size_t saltlen; /* Salt length. */ - char * iv; /* Initialization vector. */ - size_t ivlen; /* Initialization vector length. */ - int itercount; /* KDF iteration count. */ - int dklen; /* Derived key length (#bytes). */ - int effkeysize; /* RC2 effective key size (#bits) or 0. */ -}; - -typedef struct pkcs5algo pkcs5algo; -struct pkcs5algo { - const unsigned char * oid; - int (*parse)(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param); - int cipher; /* Encryption cipher. */ - size_t blocksize; /* Cipher block size. */ - char mode; /* Block encryption mode. */ - char padopt; /* Pad option. */ - char padchar; /* Pad character. */ - size_t keylen; /* Key length (#bytes). */ - int hash; /* Hash algorithm. */ - size_t hashlen; /* Hash digest length. */ - size_t saltlen; /* Salt length. */ - size_t ivlen; /* Initialisation vector length. */ - int effkeysize; /* RC2 effective key size (#bits) or 0. */ -}; - -/* id-PBES2 OID: 1.2.840.113549.1.5.13 */ -static const unsigned char OID_id_PBES2[] = { - 9, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x05, 0x0D -}; -static int parse_pbes2(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param); -static const pkcs5algo PBES2 = { - OID_id_PBES2, parse_pbes2, 0, 0, '\0', '\0', '\0', 0, - 0, 0, 0, 0, 0 -}; - -/* id-PBKDF2 OID: 1.2.840.113549.1.5.12 */ -static const unsigned char OID_id_PBKDF2[] = { - 9, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x05, 0x0C -}; -static int parse_pbkdf2(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param); -static const pkcs5algo PBKDF2 = { - OID_id_PBKDF2, parse_pbkdf2, 0, 0, '\0', '\0', '\0', - SHA_DIGEST_LENGTH, Qc3_SHA1, SHA_DIGEST_LENGTH, 8, 8, 0 -}; - -/* id-hmacWithSHA1 OID: 1.2.840.113549.2.7 */ -static const unsigned char OID_id_hmacWithSHA1[] = { - 8, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x02, 0x07 -}; -static int parse_hmacWithSHA1(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param); -static const pkcs5algo hmacWithSHA1 = { - OID_id_hmacWithSHA1, parse_hmacWithSHA1, 0, 0, '\0', '\0', '\0', - SHA_DIGEST_LENGTH, Qc3_SHA1, SHA_DIGEST_LENGTH, 8, 8, 0 -}; - -/* desCBC OID: 1.3.14.3.2.7 */ -static const unsigned char OID_desCBC[] = {5, 40 + 3, 0x0E, 0x03, 0x02, 0x07}; -static int parse_iv(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param); -static const pkcs5algo desCBC = { - OID_desCBC, parse_iv, Qc3_DES, 8, Qc3_CBC, Qc3_Pad_Counter, - '\0', 8, 0, 0, 8, 8, 0 -}; - -/* des-EDE3-CBC OID: 1.2.840.113549.3.7 */ -static const unsigned char OID_des_EDE3_CBC[] = { - 8, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x03, 0x07 -}; -static const pkcs5algo des_EDE3_CBC = { - OID_des_EDE3_CBC, parse_iv, Qc3_TDES, 8, Qc3_CBC, Qc3_Pad_Counter, - '\0', 24, 0, 0, 8, 8, 0 -}; - -/* rc2CBC OID: 1.2.840.113549.3.2 */ -static const unsigned char OID_rc2CBC[] = { - 8, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x03, 0x02 -}; -static int parse_rc2(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param); -static const pkcs5algo rc2CBC = { - OID_rc2CBC, parse_rc2, Qc3_RC2, 8, Qc3_CBC, Qc3_Pad_Counter, - '\0', 0, 0, 0, 8, 0, 32 -}; - -/* pbeWithMD5AndDES-CBC OID: 1.2.840.113549.1.5.3 */ -static const unsigned char OID_pbeWithMD5AndDES_CBC[] = { - 9, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x05, 0x03 -}; -static int parse_pbes1(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param); -static const pkcs5algo pbeWithMD5AndDES_CBC = { - OID_pbeWithMD5AndDES_CBC, parse_pbes1, Qc3_DES, 8, Qc3_CBC, - Qc3_Pad_Counter, '\0', 8, Qc3_MD5, MD5_DIGEST_LENGTH, 8, 0, 0 -}; - -/* pbeWithMD5AndRC2-CBC OID: 1.2.840.113549.1.5.6 */ -static const unsigned char OID_pbeWithMD5AndRC2_CBC[] = { - 9, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x05, 0x06 -}; -static const pkcs5algo pbeWithMD5AndRC2_CBC = { - OID_pbeWithMD5AndRC2_CBC, parse_pbes1, Qc3_RC2, 8, Qc3_CBC, - Qc3_Pad_Counter, '\0', 0, Qc3_MD5, MD5_DIGEST_LENGTH, 8, 0, 64 -}; - -/* pbeWithSHA1AndDES-CBC OID: 1.2.840.113549.1.5.10 */ -static const unsigned char OID_pbeWithSHA1AndDES_CBC[] = { - 9, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x05, 0x0A -}; -static const pkcs5algo pbeWithSHA1AndDES_CBC = { - OID_pbeWithSHA1AndDES_CBC, parse_pbes1, Qc3_DES, 8, Qc3_CBC, - Qc3_Pad_Counter, '\0', 8, Qc3_SHA1, SHA_DIGEST_LENGTH, 8, 0, 0 -}; - -/* pbeWithSHA1AndRC2-CBC OID: 1.2.840.113549.1.5.11 */ -static const unsigned char OID_pbeWithSHA1AndRC2_CBC[] = { - 9, 40 + 2, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x05, 0x0B -}; -static const pkcs5algo pbeWithSHA1AndRC2_CBC = { - OID_pbeWithSHA1AndRC2_CBC, parse_pbes1, Qc3_RC2, 8, Qc3_CBC, - Qc3_Pad_Counter, '\0', 0, Qc3_SHA1, SHA_DIGEST_LENGTH, 8, 0, 64 -}; - -/* rc5-CBC-PAD OID: 1.2.840.113549.3.9: RC5 not implemented in Qc3. */ -/* pbeWithMD2AndDES-CBC OID: 1.2.840.113549.1.5.1: MD2 not implemented. */ -/* pbeWithMD2AndRC2-CBC OID: 1.2.840.113549.1.5.4: MD2 not implemented. */ - -static const pkcs5algo * pbestable[] = { - &pbeWithMD5AndDES_CBC, - &pbeWithMD5AndRC2_CBC, - &pbeWithSHA1AndDES_CBC, - &pbeWithSHA1AndRC2_CBC, - &PBES2, - NULL -}; - -static const pkcs5algo * pbkdf2table[] = { - &PBKDF2, - NULL -}; - -static const pkcs5algo * pbes2enctable[] = { - &desCBC, - &des_EDE3_CBC, - &rc2CBC, - NULL -}; - -static const pkcs5algo * kdf2prftable[] = { - &hmacWithSHA1, - NULL -}; - - -/* Public key extraction support. */ -static struct { - unsigned char * oid; - int (*sshpubkey)(LIBSSH2_SESSION *session, char **pubkey, - asn1Element *params, asn1Element *key, - const char *method); - const char * method; -} pka[] = { -#if LIBSSH2_RSA != 0 - { OID_rsaEncryption, sshrsapubkey, "ssh-rsa" }, -#endif -#if LIBSSH2_DSA != 0 - { OID_dsaEncryption, sshdsapubkey, "ssh-dss" }, -#endif - { NULL, NULL, NULL } -}; - -/* Define ASCII strings. */ -static const char beginencprivkeyhdr[] = - "-----BEGIN ENCRYPTED PRIVATE KEY-----"; -static const char endencprivkeyhdr[] = "-----END ENCRYPTED PRIVATE KEY-----"; -static const char beginprivkeyhdr[] = "-----BEGIN PRIVATE KEY-----"; -static const char endprivkeyhdr[] = "-----END PRIVATE KEY-----"; -static const char beginrsaprivkeyhdr[] = "-----BEGIN RSA PRIVATE KEY-----"; -static const char endrsaprivkeyhdr[] = "-----END RSA PRIVATE KEY-----"; -static const char fopenrmode[] = "r"; -static const char fopenrbmode[] = "rb"; - - -/* The rest of character literals in this module are in EBCDIC. */ -#pragma convert(37) - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static Qc3_Format_KEYD0100_T nulltoken = {""}; - -static int zero = 0; -static int rsaprivate[] = { Qc3_RSA_Private }; -static char anycsp[] = { Qc3_Any_CSP }; -static char binstring[] = { Qc3_Bin_String }; -static char berstring[] = { Qc3_BER_String }; -static char qc3clear[] = { Qc3_Clear }; - -static const Qus_EC_t ecnull = {0}; /* Error causes an exception. */ - -static asn1Element lastbytebitcount = { - (char *) &zero, NULL, (char *) &zero + 1 -}; - - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: ASN.1 support. - * - *******************************************************************/ - -static char * -getASN1Element(asn1Element *elem, char *beg, char *end) -{ - unsigned char b; - unsigned long len; - asn1Element lelem; - - /* Get a single ASN.1 element into `elem', parse ASN.1 string at `beg' - * ending at `end'. - * Returns a pointer in source string after the parsed element, or NULL - * if an error occurs. - */ - - if (beg >= end || !*beg) - return NULL; - - /* Process header byte. */ - elem->header = beg; - b = (unsigned char) *beg++; - elem->constructed = (b & 0x20) != 0; - elem->class = (b >> 6) & 3; - b &= 0x1F; - if (b == 0x1F) - return NULL; /* Long tag values not supported here. */ - elem->tag = b; - - /* Process length. */ - if (beg >= end) - return NULL; - b = (unsigned char) *beg++; - if (!(b & 0x80)) - len = b; - else if (!(b &= 0x7F)) { - /* Unspecified length. Since we have all the data, we can determine the - * effective length by skipping element until an end element is - * found. - */ - if (!elem->constructed) - return NULL; - elem->beg = beg; - while (beg < end && *beg) { - beg = getASN1Element(&lelem, beg, end); - if (!beg) - return NULL; - } - if (beg >= end) - return NULL; - elem->end = beg; - return beg + 1; - } else if (beg + b > end) - return NULL; /* Does not fit in source. */ - else { - /* Get long length. */ - len = 0; - do { - if (len & 0xFF000000L) - return NULL; /* Lengths > 32 bits are not supported. */ - len = (len << 8) | (unsigned char) *beg++; - } while (--b); - } - if ((unsigned long) (end - beg) < len) - return NULL; /* Element data does not fit in source. */ - elem->beg = beg; - elem->end = beg + len; - return elem->end; -} - -static asn1Element * -asn1_new(unsigned int type, unsigned int length) -{ - asn1Element *e; - unsigned int hdrl = 2; - unsigned int i; - unsigned char *buf; - - e = (asn1Element *) malloc(sizeof *e); - - if (e) { - if (length >= 0x80) - for (i = length; i; i >>= 8) - hdrl++; - - buf = (unsigned char *) malloc(hdrl + length); - - if (buf) { - e->header = buf; - e->beg = buf + hdrl; - e->end = e->beg + length; - e->class = (type >> 6) & 0x03; - e->tag = type & 0x1F; - e->constructed = (type >> 5) & 0x01; - e->header[0] = type; - - if (length < 0x80) - e->header[1] = length; - else { - e->header[1] = (hdrl - 2) | 0x80; - do { - e->header[--hdrl] = length; - length >>= 8; - } while (length); - } - } else { - free((char *) e); - e = NULL; - } - } - - return e; -} - -static asn1Element * -asn1_new_from_bytes(const unsigned char *data, unsigned int length) -{ - asn1Element *e; - asn1Element te; - - getASN1Element(&te, - (unsigned char *) data, (unsigned char *) data + length); - e = asn1_new(te.tag, te.end - te.beg); - - if (e) - memcpy(e->header, data, e->end - e->header); - - return e; -} - -static void -asn1delete(asn1Element *e) -{ - if (e) { - if (e->header) - free((char *) e->header); - free((char *) e); - } -} - -static asn1Element * -asn1uint(_libssh2_bn *bn) -{ - asn1Element *e; - int bits; - int length; - unsigned char * p; - - if (!bn) - return NULL; - - bits = _libssh2_bn_bits(bn); - length = (bits + 8) >> 3; - e = asn1_new(ASN1_INTEGER, length); - - if (e) { - p = e->beg; - if (!(bits & 0x07)) - *p++ = 0; - _libssh2_bn_to_bin(bn, p); - } - - return e; -} - -static asn1Element * -asn1containerv(unsigned int type, valiststr args) -{ - valiststr va; - asn1Element *e; - asn1Element *p; - unsigned char *bp; - unsigned int length = 0; - - memcpy((char *) &va, (char *) &args, sizeof args); - while ((p = va_arg(va.list, asn1Element *))) - length += p->end - p->header; - va_end(va.list); - e = asn1_new(type, length); - if (e) { - bp = e->beg; - while ((p = va_arg(args.list, asn1Element *))) { - memcpy(bp, p->header, p->end - p->header); - bp += p->end - p->header; - } - } - return e; -} - -/* VARARGS1 */ -static asn1Element * -asn1container(unsigned int type, ...) -{ - valiststr va; - asn1Element *e; - - va_start(va.list, type); - e = asn1containerv(type, va); - va_end(va.list); - return e; -} - -static asn1Element * -asn1bytes(unsigned int type, const unsigned char *bytes, unsigned int length) -{ - asn1Element *e; - - e = asn1_new(type, length); - if (e && length) - memcpy(e->beg, bytes, length); - return e; -} - -static asn1Element * -rsapublickey(_libssh2_bn *e, _libssh2_bn *m) -{ - asn1Element *publicexponent; - asn1Element *modulus; - asn1Element *rsapubkey; - - /* Build a PKCS#1 RSAPublicKey. */ - - modulus = asn1uint(m); - publicexponent = asn1uint(e); - rsapubkey = asn1container(ASN1_SEQ | ASN1_CONSTRUCTED, - modulus, publicexponent, NULL); - asn1delete(modulus); - asn1delete(publicexponent); - - if (!modulus || !publicexponent) { - asn1delete(rsapubkey); - rsapubkey = NULL; - } - - return rsapubkey; -} - -static asn1Element * -rsaprivatekey(_libssh2_bn *e, _libssh2_bn *m, _libssh2_bn *d, - _libssh2_bn *p, _libssh2_bn *q, - _libssh2_bn *exp1, _libssh2_bn *exp2, _libssh2_bn *coeff) -{ - asn1Element *version; - asn1Element *modulus; - asn1Element *publicexponent; - asn1Element *privateexponent; - asn1Element *prime1; - asn1Element *prime2; - asn1Element *exponent1; - asn1Element *exponent2; - asn1Element *coefficient; - asn1Element *rsaprivkey; - - /* Build a PKCS#1 RSAPrivateKey. */ - version = asn1bytes(ASN1_INTEGER, "\0", 1); - modulus = asn1uint(m); - publicexponent = asn1uint(e); - privateexponent = asn1uint(d); - prime1 = asn1uint(p); - prime2 = asn1uint(q); - exponent1 = asn1uint(exp1); - exponent2 = asn1uint(exp2); - coefficient = asn1uint(coeff); - rsaprivkey = asn1container(ASN1_SEQ | ASN1_CONSTRUCTED, version, modulus, - publicexponent, privateexponent, prime1, prime2, - exponent1, exponent2, coefficient, NULL); - asn1delete(version); - asn1delete(modulus); - asn1delete(publicexponent); - asn1delete(privateexponent); - asn1delete(prime1); - asn1delete(prime2); - asn1delete(exponent1); - asn1delete(exponent2); - asn1delete(coefficient); - - if (!version || !modulus || !publicexponent || !privateexponent || - !prime1 || !prime2 || !exponent1 || !exponent2 || !coefficient) { - asn1delete(rsaprivkey); - rsaprivkey = NULL; - } - - return rsaprivkey; -} - -static asn1Element * -subjectpublickeyinfo(asn1Element *pubkey, const unsigned char *algo, - asn1Element *parameters) -{ - asn1Element *subjpubkey; - asn1Element *algorithm; - asn1Element *algorithmid; - asn1Element *subjpubkeyinfo; - unsigned int algosize = *algo++; - - algorithm = asn1bytes(ASN1_OBJ_ID, algo, algosize); - algorithmid = asn1container(ASN1_SEQ | ASN1_CONSTRUCTED, - algorithm, parameters, NULL); - subjpubkey = asn1container(ASN1_BIT_STRING, &lastbytebitcount, - pubkey, NULL); - subjpubkeyinfo = asn1container(ASN1_SEQ | ASN1_CONSTRUCTED, - algorithmid, subjpubkey, NULL); - asn1delete(algorithm); - asn1delete(algorithmid); - asn1delete(subjpubkey); - if (!algorithm || !algorithmid || !subjpubkey) { - asn1delete(subjpubkeyinfo); - subjpubkeyinfo = NULL; - } - return subjpubkeyinfo; -} - -static asn1Element * -rsasubjectpublickeyinfo(asn1Element *pubkey) -{ - asn1Element *parameters; - asn1Element *subjpubkeyinfo; - - parameters = asn1bytes(ASN1_NULL, NULL, 0); - subjpubkeyinfo = subjectpublickeyinfo(pubkey, - OID_rsaEncryption, parameters); - asn1delete(parameters); - if (!parameters) { - asn1delete(subjpubkeyinfo); - subjpubkeyinfo = NULL; - } - return subjpubkeyinfo; -} - -static asn1Element * -privatekeyinfo(asn1Element *privkey, const unsigned char *algo, - asn1Element *parameters) -{ - asn1Element *version; - asn1Element *privatekey; - asn1Element *algorithm; - asn1Element *privatekeyalgorithm; - asn1Element *privkeyinfo; - unsigned int algosize = *algo++; - - /* Build a PKCS#8 PrivateKeyInfo. */ - version = asn1bytes(ASN1_INTEGER, "\0", 1); - algorithm = asn1bytes(ASN1_OBJ_ID, algo, algosize); - privatekeyalgorithm = asn1container(ASN1_SEQ | ASN1_CONSTRUCTED, - algorithm, parameters, NULL); - privatekey = asn1container(ASN1_OCTET_STRING, privkey, NULL); - privkeyinfo = asn1container(ASN1_SEQ | ASN1_CONSTRUCTED, version, - privatekeyalgorithm, privatekey, NULL); - asn1delete(version); - asn1delete(algorithm); - asn1delete(privatekeyalgorithm); - if (!version || !algorithm || !privatekeyalgorithm) { - asn1delete(privkeyinfo); - privkeyinfo = NULL; - } - return privkeyinfo; -} - -static asn1Element * -rsaprivatekeyinfo(asn1Element *privkey) -{ - asn1Element *parameters; - asn1Element *privkeyinfo; - - parameters = asn1bytes(ASN1_NULL, NULL, 0); - privkeyinfo = privatekeyinfo(privkey, OID_rsaEncryption, parameters); - asn1delete(parameters); - if (!parameters) { - asn1delete(privkeyinfo); - privkeyinfo = NULL; - } - return privkeyinfo; -} - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: big numbers support. - * - *******************************************************************/ - - -_libssh2_bn * -_libssh2_bn_init(void) -{ - _libssh2_bn *bignum; - - bignum = (_libssh2_bn *) malloc(sizeof *bignum); - if (bignum) { - bignum->bignum = NULL; - bignum->length = 0; - } - - return bignum; -} - -void -_libssh2_bn_free(_libssh2_bn *bn) -{ - if (bn) { - if (bn->bignum) { -#ifdef LIBSSH2_CLEAR_MEMORY - if (bn->length) - memset((char *) bn->bignum, 0, bn->length); -#endif - free(bn->bignum); - } - - free((char *) bn); - } -} - -static int -_libssh2_bn_resize(_libssh2_bn *bn, size_t newlen) -{ - unsigned char *bignum; - - if (!bn) - return -1; - if (newlen == bn->length) - return 0; - - if (!bn->bignum) - bignum = (unsigned char *) malloc(newlen); - else { -#ifdef LIBSSH2_CLEAR_MEMORY - if (newlen < bn->length) - memset((char *) bn->bignum + newlen, 0, bn->length - newlen); -#endif - if (!newlen) { - free((char *) bn->bignum); - bn->bignum = NULL; - bn->length = 0; - return 0; - } - bignum = (unsigned char *) realloc((char *) bn->bignum, newlen); - } - - if (!bignum) - return -1; - - if (newlen > bn->length) - memset((char *) bignum + bn->length, 0, newlen - bn->length); - - bn->bignum = bignum; - bn->length = newlen; - return 0; -} - -unsigned long -_libssh2_bn_bits(_libssh2_bn *bn) -{ - unsigned int i; - unsigned char b; - - if (bn && bn->bignum) { - for (i = bn->length; i--;) - if ((b = bn->bignum[i])) { - i *= 8; - do { - i++; - } while (b >>= 1); - return i; - } - } - - return 0; -} - -int -_libssh2_bn_from_bin(_libssh2_bn *bn, int len, const unsigned char *val) -{ - int i; - - if (!bn || (len && !val)) - return -1; - - for (; len && !*val; len--) - val++; - - if (_libssh2_bn_resize(bn, len)) - return -1; - - for (i = len; i--;) - bn->bignum[i] = *val++; - - return 0; -} - -int -_libssh2_bn_set_word(_libssh2_bn *bn, unsigned long val) -{ - val = htonl(val); - return _libssh2_bn_from_bin(bn, sizeof val, (unsigned char *) &val); -} - -int -_libssh2_bn_to_bin(_libssh2_bn *bn, unsigned char *val) -{ - int i; - - if (!bn || !val) - return -1; - - for (i = bn->length; i--;) - *val++ = bn->bignum[i]; - - return 0; -} - -static int -_libssh2_bn_from_bn(_libssh2_bn *to, _libssh2_bn *from) -{ - int i; - - if (!to || !from) - return -1; - - if (_libssh2_bn_resize(to, from->length)) - return -1; - - for (i = to->length; i--;) - to->bignum[i] = from->bignum[i]; - - return 0; -} - -void -_libssh2_random(unsigned char *buf, int len) -{ - Qc3GenPRNs(buf, len, - Qc3PRN_TYPE_NORMAL, Qc3PRN_NO_PARITY, (char *) &ecnull); -} - -int -_libssh2_bn_rand(_libssh2_bn *bn, int bits, int top, int bottom) -{ - int len; - int i; - - if (!bn || bits <= 0) - return -1; - len = (bits + 7) >> 3; - if (_libssh2_bn_resize(bn, len)) - return -1; - _libssh2_random(bn->bignum, len); - i = ((bits - 1) & 07) + 1; - bn->bignum[len - 1] &= (1 << i) - 1; - switch (top) { - case 1: - if (bits > 1) - if (i > 1) - bn->bignum[len - 1] |= 1 << (i - 2); - else - bn->bignum[len - 2] |= 0x80; - /* Fall into. */ - case 0: - bn->bignum[len - 1] |= 1 << (i - 1); - break; - } - if (bottom) - *bn->bignum |= 0x01; - return 0; -} - -static int -_libssh2_bn_lshift(_libssh2_bn *bn) -{ - int i; - int c = 0; - - if (!bn) - return -1; - - if (_libssh2_bn_resize(bn, (_libssh2_bn_bits(bn) + 8) >> 3)) - return -1; - - for (i = 0; i < bn->length; i++) { - if (bn->bignum[i] & 0x80) - c |= 0x02; - bn->bignum[i] = (bn->bignum[i] << 1) | (c & 0x01); - c >>= 1; - } - - return 0; -} - -static int -_libssh2_bn_rshift(_libssh2_bn *bn) -{ - int i; - int c = 0; - - if (!bn) - return -1; - - for (i = bn->length; i--;) { - if (bn->bignum[i] & 0x01) - c |= 0x100; - bn->bignum[i] = (bn->bignum[i] >> 1) | (c & 0x80); - c >>= 1; - } - - if (_libssh2_bn_resize(bn, (_libssh2_bn_bits(bn) + 7) >> 3)) - return -1; - - return 0; -} - -static void -_libssh2_bn_swap(_libssh2_bn *bn1, _libssh2_bn *bn2) -{ - _libssh2_bn t = *bn1; - - *bn1 = *bn2; - *bn2 = t; -} - -static int -_libssh2_bn_subtract(_libssh2_bn *d, _libssh2_bn *bn1, _libssh2_bn *bn2) -{ - int c = 0; - int i; - - if (bn1->length < bn2->length) - return -1; - - if (_libssh2_bn_resize(d, bn1->length)) - return -1; - - for (i = 0; i < bn2->length; i++) { - c += (int) bn1->bignum[i] - (int) bn2->bignum[i]; - d->bignum[i] = c; - c = c < 0? -1: 0; - } - - for (; c && i < bn1->length; i++) { - c += (int) bn1->bignum[i]; - d->bignum[i] = c; - c = c < 0? -1: 0; - } - - if (_libssh2_bn_resize(d, (_libssh2_bn_bits(d) + 7) >> 3)) - return -1; - - return c; -} - -int -_libssh2_os400qc3_bn_mod_exp(_libssh2_bn *r, _libssh2_bn *a, _libssh2_bn *p, - _libssh2_bn *m) -{ - _libssh2_bn *mp; - _libssh2_bn *rp; - asn1Element *rsapubkey; - asn1Element *subjpubkeyinfo; - unsigned char *av; - unsigned char *rv; - char *keydbuf; - Qc3_Format_ALGD0400_T algd; - Qc3_Format_KEYD0200_T *keyd; - Qus_EC_t errcode; - int sc; - int outlen; - int ret = -1; - - /* There is no support for this function in the Qc3 crypto-library. - Since a RSA encryption performs this function, we can emulate it - by creating an RSA public key in ASN.1 SubjectPublicKeyInfo format - from p (exponent) and m (modulus) and encrypt a with this key. The - encryption output is the function result. - Problem: the Qc3EncryptData procedure only succeeds if the data bit - count is less than the modulus bit count. To satisfy this condition, - we multiply the modulus by a power of two and adjust the result - accordingly. */ - - if (!r || !a || !p) - return ret; - - mp = _libssh2_bn_init(); - if (!mp) - return ret; - if (_libssh2_bn_from_bn(mp, m)) { - _libssh2_bn_free(mp); - return ret; - } - for (sc = 0; _libssh2_bn_bits(mp) <= 8 * a->length; sc++) - if (_libssh2_bn_lshift(mp)) { - _libssh2_bn_free(mp); - return ret; - } - - rsapubkey = rsapublickey(p, mp); - subjpubkeyinfo = rsasubjectpublickeyinfo(rsapubkey); - asn1delete(rsapubkey); - - if (!rsapubkey || !subjpubkeyinfo) { - asn1delete(rsapubkey); - asn1delete(subjpubkeyinfo); - _libssh2_bn_free(mp); - return ret; - } - - av = (unsigned char *) alloca(a->length); - rv = (unsigned char *) alloca(mp->length); - keydbuf = alloca(sizeof *keyd + - subjpubkeyinfo->end - subjpubkeyinfo->header); - - if (av && rv && keydbuf) { - _libssh2_bn_to_bin(a, av); - algd.Public_Key_Alg = Qc3_RSA; - algd.PKA_Block_Format = Qc3_Zero_Pad; - memset(algd.Reserved, 0, sizeof algd.Reserved); - algd.Signing_Hash_Alg = 0; - keyd = (Qc3_Format_KEYD0200_T *) keydbuf; - keyd->Key_Type = Qc3_RSA_Public; - keyd->Key_String_Len = subjpubkeyinfo->end - subjpubkeyinfo->header; - keyd->Key_Format = Qc3_BER_String; - memset(keyd->Reserved, 0, sizeof keyd->Reserved); - memcpy(keydbuf + sizeof *keyd, subjpubkeyinfo->header, - keyd->Key_String_Len); - set_EC_length(errcode, sizeof errcode); - Qc3EncryptData(av, (int *) &a->length, Qc3_Data, (char *) &algd, - Qc3_Alg_Public_Key, keydbuf, Qc3_Key_Parms, anycsp, - NULL, rv, (int *) &mp->length, &outlen, &errcode); - if (!errcode.Bytes_Available) { - _libssh2_bn_from_bin(r, outlen, rv); - if (!sc) - ret = 0; - else { - rp = _libssh2_bn_init(); - if (rp) { - do { - _libssh2_bn_rshift(mp); - if (!_libssh2_bn_subtract(rp, r, mp)) - _libssh2_bn_swap(r, rp); - } while (--sc); - _libssh2_bn_free(rp); - ret = 0; - } - } - } - } - asn1delete(subjpubkeyinfo); - _libssh2_bn_free(mp); - return ret; -} - - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: crypto context support. - * - *******************************************************************/ - -static _libssh2_os400qc3_crypto_ctx * -libssh2_init_crypto_ctx(_libssh2_os400qc3_crypto_ctx *ctx) -{ - if (!ctx) - ctx = (_libssh2_os400qc3_crypto_ctx *) malloc(sizeof *ctx); - - if (ctx) { - memset((char *) ctx, 0, sizeof *ctx); - ctx->hash.Final_Op_Flag = Qc3_Continue; - } - - return ctx; -} - -static int -null_token(const char *token) -{ - return !memcmp(token, nulltoken.Key_Context_Token, - sizeof nulltoken.Key_Context_Token); -} - -void -_libssh2_os400qc3_crypto_dtor(_libssh2_os400qc3_crypto_ctx *x) -{ - if (!x) - return; - if (!null_token(x->hash.Alg_Context_Token)) { - Qc3DestroyAlgorithmContext(x->hash.Alg_Context_Token, (char *) &ecnull); - memset(x->hash.Alg_Context_Token, 0, sizeof x->hash.Alg_Context_Token); - } - if (!null_token(x->key.Key_Context_Token)) { - Qc3DestroyKeyContext(x->key.Key_Context_Token, (char *) &ecnull); - memset(x->key.Key_Context_Token, 0, sizeof x->key.Key_Context_Token); - } - if (x->kek) { - _libssh2_os400qc3_crypto_dtor(x->kek); - free((char *) x->kek); - x->kek = NULL; - } -} - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: hash algorithms support. - * - *******************************************************************/ - -int -libssh2_os400qc3_hash_init(Qc3_Format_ALGD0100_T *x, unsigned int algorithm) -{ - Qc3_Format_ALGD0500_T algd; - Qus_EC_t errcode; - - if (!x) - return 0; - - memset((char *) x, 0, sizeof *x); - x->Final_Op_Flag = Qc3_Continue; - algd.Hash_Alg = algorithm; - set_EC_length(errcode, sizeof errcode); - Qc3CreateAlgorithmContext((char *) &algd, Qc3_Alg_Hash, - x->Alg_Context_Token, &errcode); - return errcode.Bytes_Available? 0: 1; -} - -void -libssh2_os400qc3_hash_update(Qc3_Format_ALGD0100_T *ctx, - unsigned char *data, int len) -{ - char dummy[64]; - - ctx->Final_Op_Flag = Qc3_Continue; - Qc3CalculateHash((char *) data, &len, Qc3_Data, (char *) ctx, - Qc3_Alg_Token, anycsp, NULL, dummy, (char *) &ecnull); -} - -void -libssh2_os400qc3_hash_final(Qc3_Format_ALGD0100_T *ctx, unsigned char *out) -{ - char data; - - ctx->Final_Op_Flag = Qc3_Final; - Qc3CalculateHash(&data, &zero, Qc3_Data, (char *) ctx, Qc3_Alg_Token, - anycsp, NULL, (char *) out, (char *) &ecnull); - Qc3DestroyAlgorithmContext(ctx->Alg_Context_Token, (char *) &ecnull); - memset(ctx->Alg_Context_Token, 0, sizeof ctx->Alg_Context_Token); -} - -int -libssh2_os400qc3_hash(const unsigned char *message, unsigned long len, - unsigned char *out, unsigned int algo) -{ - Qc3_Format_ALGD0100_T ctx; - - if (!libssh2_os400qc3_hash_init(&ctx, algo)) - return 1; - - libssh2_os400qc3_hash_update(&ctx, (unsigned char *) message, len); - libssh2_os400qc3_hash_final(&ctx, out); - return 0; -} - -void -libssh2_os400qc3_hmac_init(_libssh2_os400qc3_crypto_ctx *ctx, - int algo, size_t minkeylen, void *key, int keylen) -{ - if (keylen < minkeylen) { - char *lkey = alloca(minkeylen); - - /* Pad key with zeroes if too short. */ - if (!lkey) - return; - memcpy(lkey, (char *) key, keylen); - memset(lkey + keylen, 0, minkeylen - keylen); - key = (void *) lkey; - keylen = minkeylen; - } - libssh2_os400qc3_hash_init(&ctx->hash, algo); - Qc3CreateKeyContext((char *) key, &keylen, binstring, &algo, qc3clear, - NULL, NULL, ctx->key.Key_Context_Token, - (char *) &ecnull); -} - -void -libssh2_os400qc3_hmac_update(_libssh2_os400qc3_crypto_ctx *ctx, - unsigned char *data, int len) -{ - char dummy[64]; - - ctx->hash.Final_Op_Flag = Qc3_Continue; - Qc3CalculateHMAC((char *) data, &len, Qc3_Data, (char *) &ctx->hash, - Qc3_Alg_Token, ctx->key.Key_Context_Token, Qc3_Key_Token, - anycsp, NULL, dummy, (char *) &ecnull); -} - -void -libssh2_os400qc3_hmac_final(_libssh2_os400qc3_crypto_ctx *ctx, - unsigned char *out) -{ - char data; - - ctx->hash.Final_Op_Flag = Qc3_Final; - Qc3CalculateHMAC((char *) data, &zero, Qc3_Data, (char *) &ctx->hash, - Qc3_Alg_Token, ctx->key.Key_Context_Token, Qc3_Key_Token, - anycsp, NULL, (char *) out, (char *) &ecnull); -} - - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: cipher algorithms support. - * - *******************************************************************/ - -int -_libssh2_cipher_init(_libssh2_cipher_ctx *h, _libssh2_cipher_type(algo), - unsigned char *iv, unsigned char *secret, int encrypt) -{ - Qc3_Format_ALGD0200_T algd; - Qus_EC_t errcode; - - (void) encrypt; - - if (!h) - return -1; - - libssh2_init_crypto_ctx(h); - algd.Block_Cipher_Alg = algo.algo; - algd.Block_Length = algo.size; - algd.Mode = algo.mode; - algd.Pad_Option = Qc3_No_Pad; - algd.Pad_Character = 0; - algd.Reserved = 0; - algd.MAC_Length = 0; - algd.Effective_Key_Size = 0; - memset(algd.Init_Vector, 0 , sizeof algd.Init_Vector); - if (algo.mode != Qc3_ECB && algo.size) - memcpy(algd.Init_Vector, iv, algo.size); - set_EC_length(errcode, sizeof errcode); - Qc3CreateAlgorithmContext((char *) &algd, algo.fmt, - h->hash.Alg_Context_Token, &errcode); - if (errcode.Bytes_Available) - return -1; - Qc3CreateKeyContext((char *) secret, &algo.keylen, binstring, - &algo.algo, qc3clear, NULL, NULL, - h->key.Key_Context_Token, (char *) &errcode); - if (errcode.Bytes_Available) { - _libssh2_os400qc3_crypto_dtor(h); - return -1; - } - - return 0; -} - -int -_libssh2_cipher_crypt(_libssh2_cipher_ctx *ctx, - _libssh2_cipher_type(algo), - int encrypt, unsigned char *block, size_t blocksize) -{ - Qus_EC_t errcode; - int outlen; - int blksize = blocksize; - - (void) algo; - - set_EC_length(errcode, sizeof errcode); - if (encrypt) - Qc3EncryptData((char *) block, &blksize, Qc3_Data, - ctx->hash.Alg_Context_Token, Qc3_Alg_Token, - ctx->key.Key_Context_Token, Qc3_Key_Token, anycsp, NULL, - (char *) block, &blksize, &outlen, (char *) &errcode); - else - Qc3DecryptData((char *) block, &blksize, - ctx->hash.Alg_Context_Token, Qc3_Alg_Token, - ctx->key.Key_Context_Token, Qc3_Key_Token, anycsp, NULL, - (char *) block, &blksize, &outlen, (char *) &errcode); - - return errcode.Bytes_Available? -1: 0; -} - - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: RSA support. - * - *******************************************************************/ - -int -_libssh2_rsa_new(libssh2_rsa_ctx **rsa, - const unsigned char *edata, unsigned long elen, - const unsigned char *ndata, unsigned long nlen, - const unsigned char *ddata, unsigned long dlen, - const unsigned char *pdata, unsigned long plen, - const unsigned char *qdata, unsigned long qlen, - const unsigned char *e1data, unsigned long e1len, - const unsigned char *e2data, unsigned long e2len, - const unsigned char *coeffdata, unsigned long coefflen) -{ - libssh2_rsa_ctx *ctx; - _libssh2_bn *e = _libssh2_bn_init_from_bin(); - _libssh2_bn *n = _libssh2_bn_init_from_bin(); - _libssh2_bn *d = NULL; - _libssh2_bn *p = NULL; - _libssh2_bn *q = NULL; - _libssh2_bn *e1 = NULL; - _libssh2_bn *e2 = NULL; - _libssh2_bn *coeff = NULL; - asn1Element *key = NULL; - asn1Element *structkey = NULL; - Qc3_Format_ALGD0400_T algd; - Qus_EC_t errcode; - int keytype; - int ret = 0; - int i; - - ctx = libssh2_init_crypto_ctx(NULL); - if (!ctx) - ret = -1; - if (!ret) { - _libssh2_bn_from_bin(e, elen, edata); - _libssh2_bn_from_bin(n, nlen, ndata); - if (!e || !n) - ret = -1; - } - if (!ret && ddata) { - /* Private key. */ - d = _libssh2_bn_init_from_bin(); - _libssh2_bn_from_bin(d, dlen, ddata); - p = _libssh2_bn_init_from_bin(); - _libssh2_bn_from_bin(p, plen, pdata); - q = _libssh2_bn_init_from_bin(); - _libssh2_bn_from_bin(q, qlen, qdata); - e1 = _libssh2_bn_init_from_bin(); - _libssh2_bn_from_bin(e1, e1len, e1data); - e2 = _libssh2_bn_init_from_bin(); - _libssh2_bn_from_bin(e2, e2len, e2data); - coeff = _libssh2_bn_init_from_bin(); - _libssh2_bn_from_bin(coeff, coefflen, coeffdata); - if (!d || !p || !q ||!e1 || !e2 || !coeff) - ret = -1; - - if (!ret) { - /* Build a PKCS#8 private key. */ - key = rsaprivatekey(e, n, d, p, q, e1, e2, coeff); - structkey = rsaprivatekeyinfo(key); - } - keytype = Qc3_RSA_Private; - } else if (!ret) { - key = rsapublickey(e, n); - structkey = rsasubjectpublickeyinfo(key); - keytype = Qc3_RSA_Public; - } - if (!key || !structkey) - ret = -1; - - set_EC_length(errcode, sizeof errcode); - - if (!ret) { - /* Create the algorithm context. */ - algd.Public_Key_Alg = Qc3_RSA; - algd.PKA_Block_Format = Qc3_PKCS1_01; - memset(algd.Reserved, 0, sizeof algd.Reserved); - algd.Signing_Hash_Alg = Qc3_SHA1; - Qc3CreateAlgorithmContext((char *) &algd, Qc3_Alg_Public_Key, - ctx->hash.Alg_Context_Token, &errcode); - if (errcode.Bytes_Available) - ret = -1; - ctx->hash.Final_Op_Flag = Qc3_Continue; - } - - /* Create the key context. */ - if (!ret) { - i = structkey->end - structkey->header; - Qc3CreateKeyContext(structkey->header, &i, berstring, &keytype, - qc3clear, NULL, NULL, ctx->key.Key_Context_Token, - (char *) &errcode); - if (errcode.Bytes_Available) - ret = -1; - } - - _libssh2_bn_free(e); - _libssh2_bn_free(n); - _libssh2_bn_free(d); - _libssh2_bn_free(p); - _libssh2_bn_free(q); - _libssh2_bn_free(e1); - _libssh2_bn_free(e2); - _libssh2_bn_free(coeff); - asn1delete(key); - asn1delete(structkey); - if (ret && ctx) { - _libssh2_rsa_free(ctx); - ctx = NULL; - } - *rsa = ctx; - return ret; -} - - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: PKCS#5 supplement. - * - *******************************************************************/ - -static int -oidcmp(const asn1Element *e, const unsigned char *oid) -{ - int i = e->end - e->beg - *oid++; - - if (*e->header != ASN1_OBJ_ID) - return -2; - if (!i) - i = memcmp(e->beg, oid, oid[-1]); - return i; -} - -static int -asn1getword(asn1Element *e, unsigned long *v) -{ - unsigned long a; - const unsigned char *cp; - - if (*e->header != ASN1_INTEGER) - return -1; - for (cp = e->beg; cp < e->end && !*cp; cp++) - ; - if (e->end - cp > sizeof a) - return -1; - for (a = 0; cp < e->end; cp++) - a = (a << 8) | *cp; - *v = a; - return 0; -} - -static int -pbkdf1(LIBSSH2_SESSION *session, char **dk, const unsigned char * passphrase, - pkcs5params *pkcs5) -{ - int i; - Qc3_Format_ALGD0100_T hctx; - int len = pkcs5->saltlen; - char *data = (char *) pkcs5->salt; - - *dk = NULL; - if (pkcs5->dklen > pkcs5->hashlen) - return -1; - - /* Allocate the derived key buffer. */ - if (!(*dk = LIBSSH2_ALLOC(session, pkcs5->hashlen))) - return -1; - - /* Initial hash. */ - libssh2_os400qc3_hash_init(&hctx, pkcs5->hash); - libssh2_os400qc3_hash_update(&hctx, (unsigned char *) passphrase, - strlen(passphrase)); - hctx.Final_Op_Flag = Qc3_Final; - Qc3CalculateHash((char *) pkcs5->salt, &len, Qc3_Data, (char *) &hctx, - Qc3_Alg_Token, anycsp, NULL, *dk, (char *) &ecnull); - - /* Iterate. */ - len = pkcs5->hashlen; - for (i = 1; i < pkcs5->itercount; i++) - Qc3CalculateHash((char *) *dk, &len, Qc3_Data, (char *) &hctx, - Qc3_Alg_Token, anycsp, NULL, *dk, (char *) &ecnull); - - /* Special stuff for PBES1: split derived key into 8-byte key and 8-byte - initialization vector. */ - pkcs5->dklen = 8; - pkcs5->ivlen = 8; - pkcs5->iv = *dk + 8; - - /* Clean-up and exit. */ - Qc3DestroyAlgorithmContext(hctx.Alg_Context_Token, (char *) &ecnull); - return 0; -} - -static int -pbkdf2(LIBSSH2_SESSION *session, char **dk, const unsigned char * passphrase, - pkcs5params *pkcs5) -{ - size_t i; - size_t k; - int j; - int l; - uint32_t ni; - unsigned long long t; - char *mac; - char *buf; - _libssh2_os400qc3_crypto_ctx hctx; - - *dk = NULL; - t = ((unsigned long long) pkcs5->dklen + pkcs5->hashlen - 1) / - pkcs5->hashlen; - if (t > 0xFFFFFFFF) - return -1; - mac = alloca(pkcs5->hashlen); - if (!mac) - return -1; - - /* Allocate the derived key buffer. */ - l = t; - if (!(buf = LIBSSH2_ALLOC(session, l * pkcs5->hashlen))) - return -1; - *dk = buf; - - /* Create an HMAC context for our computations. */ - libssh2_os400qc3_hmac_init(&hctx, pkcs5->hash, pkcs5->hashlen, - (void *) passphrase, strlen(passphrase)); - - /* Process each hLen-size blocks. */ - for (i = 1; i <= l; i++) { - ni = htonl(i); - libssh2_os400qc3_hmac_update(&hctx, pkcs5->salt, pkcs5->saltlen); - libssh2_os400qc3_hmac_update(&hctx, (char *) &ni, sizeof ni); - libssh2_os400qc3_hmac_final(&hctx, mac); - memcpy(buf, mac, pkcs5->hashlen); - for (j = 1; j < pkcs5->itercount; j++) { - libssh2_os400qc3_hmac_update(&hctx, mac, pkcs5->hashlen); - libssh2_os400qc3_hmac_final(&hctx, mac); - for (k = 0; k < pkcs5->hashlen; k++) - buf[k] ^= mac[k]; - } - buf += pkcs5->hashlen; - } - - /* Computation done. Release HMAC context. */ - _libssh2_os400qc3_crypto_dtor(&hctx); - return 0; -} - -static int -parse_pkcs5_algorithm(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - asn1Element *algid, pkcs5algo **algotable) -{ - asn1Element oid; - asn1Element param; - char *cp; - - cp = getASN1Element(&oid, algid->beg, algid->end); - if (!cp || *oid.header != ASN1_OBJ_ID) - return -1; - param.header = NULL; - if (cp < algid->end) - cp = getASN1Element(¶m, cp, algid->end); - if (cp != algid->end) - return -1; - for (; *algotable; algotable++) - if (!oidcmp(&oid, (*algotable)->oid)) - return (*(*algotable)->parse)(session, pkcs5, *algotable, - param.header? ¶m: NULL); - return -1; -} - -static int -parse_pbes2(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param) -{ - asn1Element keyDerivationFunc; - asn1Element encryptionScheme; - char *cp; - - if (!param || *param->header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - cp = getASN1Element(&keyDerivationFunc, param->beg, param->end); - if (!cp || *keyDerivationFunc.header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - if (getASN1Element(&encryptionScheme, cp, param->end) != param->end || - *encryptionScheme.header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - if (parse_pkcs5_algorithm(session, pkcs5, &encryptionScheme, pbes2enctable)) - return -1; - if (parse_pkcs5_algorithm(session, pkcs5, &keyDerivationFunc, pbkdf2table)) - return -1; - return 0; -} - -static int -parse_pbkdf2(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param) -{ - asn1Element salt; - asn1Element iterationCount; - asn1Element keyLength; - asn1Element prf; - unsigned long itercount; - char *cp; - - if (!param || *param->header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - cp = getASN1Element(&salt, param->beg, param->end); - /* otherSource not supported. */ - if (!cp || *salt.header != ASN1_OCTET_STRING) - return -1; - cp = getASN1Element(&iterationCount, cp, param->end); - if (!cp || *iterationCount.header != ASN1_INTEGER) - return -1; - keyLength.header = prf.header = NULL; - if (cp < param->end) { - cp = getASN1Element(&prf, cp, param->end); - if (!cp) - return -1; - if (*prf.header == ASN1_INTEGER) { - keyLength = prf; - prf.header = NULL; - if (cp < param->end) - cp = getASN1Element(&prf, cp, param->end); - } - if (cp != param->end) - return -1; - } - pkcs5->hash = algo->hash; - pkcs5->hashlen = algo->hashlen; - if (prf.header) { - if (*prf.header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - if (parse_pkcs5_algorithm(session, pkcs5, &prf, kdf2prftable)) - return -1; - } - pkcs5->saltlen = salt.end - salt.beg; - pkcs5->salt = salt.beg; - if (asn1getword(&iterationCount, &itercount) || - !itercount || itercount > 100000) - return -1; - pkcs5->itercount = itercount; - pkcs5->kdf = pbkdf2; - return 0; -} - -static int -parse_hmacWithSHA1(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param) -{ - if (!param || *param->header != ASN1_NULL) - return -1; - pkcs5->hash = algo->hash; - pkcs5->hashlen = algo->hashlen; - return 0; -} - -static int -parse_iv(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param) -{ - if (!param || *param->header != ASN1_OCTET_STRING || - param->end - param->beg != algo->ivlen) - return -1; - pkcs5->cipher = algo->cipher; - pkcs5->blocksize = algo->blocksize; - pkcs5->mode = algo->mode; - pkcs5->padopt = algo->padopt; - pkcs5->padchar = algo->padchar; - pkcs5->dklen = algo->keylen; - pkcs5->ivlen = algo->ivlen; - pkcs5->iv = param->beg; - return 0; -} - -static int -parse_rc2(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param) -{ - asn1Element iv; - unsigned long effkeysize; - char *cp; - - if (!param || *param->header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - cp = getASN1Element(&iv, param->beg, param->end); - if (!cp) - return -1; - effkeysize = algo->effkeysize; - if (*iv.header == ASN1_INTEGER) { - if (asn1getword(&iv, &effkeysize) || effkeysize > 1024) - return -1; - - cp = getASN1Element(&iv, cp, param->end); - if (effkeysize < 256) - switch (effkeysize) { - case 160: - effkeysize = 40; - case 120: - effkeysize = 64; - case 58: - effkeysize = 128; - break; - default: - return -1; - } - } - if (effkeysize > 1024 || cp != param->end || - *iv.header != ASN1_OCTET_STRING || iv.end - iv.beg != algo->ivlen) - return -1; - pkcs5->cipher = algo->cipher; - pkcs5->blocksize = algo->blocksize; - pkcs5->mode = algo->mode; - pkcs5->padopt = algo->padopt; - pkcs5->padchar = algo->padchar; - pkcs5->ivlen = algo->ivlen; - pkcs5->iv = iv.beg; - pkcs5->effkeysize = effkeysize; - pkcs5->dklen = (effkeysize + 8 - 1) / 8; - return 0; -} - -static int -parse_pbes1(LIBSSH2_SESSION *session, pkcs5params *pkcs5, - pkcs5algo *algo, asn1Element *param) -{ - asn1Element salt; - asn1Element iterationCount; - unsigned long itercount; - char *cp; - - if (!param || *param->header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - - cp = getASN1Element(&salt, param->beg, param->end); - if (!cp || *salt.header != ASN1_OCTET_STRING || - salt.end - salt.beg != algo->saltlen) - return -1; - if (getASN1Element(&iterationCount, cp, param->end) != param->end || - *iterationCount.header != ASN1_INTEGER) - return -1; - if (asn1getword(&iterationCount, &itercount) || - !itercount || itercount > 100000) - return -1; - pkcs5->cipher = algo->cipher; - pkcs5->blocksize = algo->blocksize; - pkcs5->mode = algo->mode; - pkcs5->padopt = algo->padopt; - pkcs5->padchar = algo->padchar; - pkcs5->hash = algo->hash; - pkcs5->hashlen = algo->hashlen; - pkcs5->dklen = 16; - pkcs5->saltlen = algo->saltlen; - pkcs5->effkeysize = algo->effkeysize; - pkcs5->salt = salt.beg; - pkcs5->kdf = pbkdf1; - pkcs5->itercount = itercount; - return 0; -} - -static int -pkcs8kek(LIBSSH2_SESSION *session, _libssh2_os400qc3_crypto_ctx **ctx, - const unsigned char *data, unsigned int datalen, - const unsigned char *passphrase, asn1Element *privkeyinfo) -{ - asn1Element encprivkeyinfo; - asn1Element pkcs5alg; - pkcs5params pkcs5; - size_t pplen; - char *cp; - unsigned long t; - int i; - char *dk = NULL; - Qc3_Format_ALGD0200_T algd; - Qus_EC_t errcode; - - /* Determine if the PKCS#8 data is encrypted and, if so, set-up a - key encryption key and algorithm in context. - Return 1 if encrypted, 0, if not, -1 if error. */ - - *ctx = NULL; - privkeyinfo->beg = (char *) data; - privkeyinfo->end = privkeyinfo->beg + datalen; - - /* If no passphrase is given, it cannot be an encrypted key. */ - if (!passphrase || !*passphrase) - return 0; - - /* Parse PKCS#8 data, checking if ASN.1 format is PrivateKeyInfo or - EncryptedPrivateKeyInfo. */ - if (getASN1Element(&encprivkeyinfo, privkeyinfo->beg, privkeyinfo->end) != - (char *) data + datalen || - *encprivkeyinfo.header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - cp = getASN1Element(&pkcs5alg, encprivkeyinfo.beg, encprivkeyinfo.end); - if (!cp) - return -1; - - switch (*pkcs5alg.header) { - case ASN1_INTEGER: /* Version. */ - return 0; /* This is a PrivateKeyInfo --> not encrypted. */ - case ASN1_SEQ | ASN1_CONSTRUCTED: /* AlgorithIdentifier. */ - break; /* This is an EncryptedPrivateKeyInfo --> encrypted. */ - default: - return -1; /* Unrecognized: error. */ - } - - /* Get the encrypted key data. */ - if (getASN1Element(privkeyinfo, cp, encprivkeyinfo.end) != - encprivkeyinfo.end || *privkeyinfo->header != ASN1_OCTET_STRING) - return -1; - - /* PKCS#5: parse the PBES AlgorithmIdentifier and recursively get all - encryption parameters. */ - memset((char *) &pkcs5, 0, sizeof pkcs5); - if (parse_pkcs5_algorithm(session, &pkcs5, &pkcs5alg, pbestable)) - return -1; - - /* Compute the derived key. */ - if ((*pkcs5.kdf)(session, &dk, passphrase, &pkcs5)) - return -1; - - /* Prepare the algorithm descriptor. */ - memset((char *) &algd, 0, sizeof algd); - algd.Block_Cipher_Alg = pkcs5.cipher; - algd.Block_Length = pkcs5.blocksize; - algd.Mode = pkcs5.mode; - algd.Pad_Option = pkcs5.padopt; - algd.Pad_Character = pkcs5.padchar; - algd.Effective_Key_Size = pkcs5.effkeysize; - memcpy(algd.Init_Vector, pkcs5.iv, pkcs5.ivlen); - - /* Create the key and algorithm context tokens. */ - *ctx = libssh2_init_crypto_ctx(NULL); - if (!*ctx) { - LIBSSH2_FREE(session, dk); - return -1; - } - libssh2_init_crypto_ctx(*ctx); - set_EC_length(errcode, sizeof errcode); - Qc3CreateKeyContext(dk, &pkcs5.dklen, binstring, &algd.Block_Cipher_Alg, - qc3clear, NULL, NULL, (*ctx)->key.Key_Context_Token, - (char *) &errcode); - LIBSSH2_FREE(session, dk); - if (errcode.Bytes_Available) { - free((char *) *ctx); - *ctx = NULL; - return -1; - } - - Qc3CreateAlgorithmContext((char *) &algd, Qc3_Alg_Block_Cipher, - (*ctx)->hash.Alg_Context_Token, &errcode); - if (errcode.Bytes_Available) { - Qc3DestroyKeyContext((*ctx)->key.Key_Context_Token, (char *) &ecnull); - free((char *) *ctx); - *ctx = NULL; - return -1; - } - return 1; /* Tell it's encrypted. */ -} - -static int -rsapkcs8privkey(LIBSSH2_SESSION *session, - const unsigned char *data, unsigned int datalen, - const unsigned char *passphrase, void *loadkeydata) -{ - libssh2_rsa_ctx *ctx = (libssh2_rsa_ctx *) loadkeydata; - char keyform = Qc3_Clear; - char *kek = NULL; - char *kea = NULL; - _libssh2_os400qc3_crypto_ctx *kekctx; - asn1Element pki; - int pkilen; - Qus_EC_t errcode; - - switch (pkcs8kek(session, &kekctx, data, datalen, passphrase, &pki)) { - case 1: - keyform = Qc3_Encrypted; - kek = kekctx->key.Key_Context_Token; - kea = kekctx->hash.Alg_Context_Token; - case 0: - break; - default: - return -1; - } - - set_EC_length(errcode, sizeof errcode); - pkilen = pki.end - pki.beg; - Qc3CreateKeyContext((unsigned char *) pki.beg, &pkilen, berstring, - rsaprivate, &keyform, kek, kea, - ctx->key.Key_Context_Token, (char *) &errcode); - if (errcode.Bytes_Available) { - if (kekctx) - _libssh2_os400qc3_crypto_dtor(kekctx); - return -1; - } - ctx->kek = kekctx; - return 0; -} - -static char * -storewithlength(char *p, const char *data, int length) -{ - _libssh2_htonu32(p, length); - if (length) - memcpy(p + 4, data, length); - return p + 4 + length; -} - -static int -sshrsapubkey(LIBSSH2_SESSION *session, char **sshpubkey, - asn1Element *params, asn1Element *key, const char *method) -{ - int methlen = strlen(method); - asn1Element keyseq; - asn1Element m; - asn1Element e; - int len; - char *cp; - - if (getASN1Element(&keyseq, key->beg + 1, key->end) != key->end || - *keyseq.header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - if (!getASN1Element(&m, keyseq.beg, keyseq.end) || - *m.header != ASN1_INTEGER) - return -1; - if (getASN1Element(&e, m.end, keyseq.end) != keyseq.end || - *e.header != ASN1_INTEGER) - return -1; - len = 4 + methlen + 4 + (e.end - e.beg) + 4 + (m.end - m.beg); - cp = LIBSSH2_ALLOC(session, len); - if (!cp) - return -1; - *sshpubkey = cp; - cp = storewithlength(cp, method, methlen); - cp = storewithlength(cp, e.beg, e.end - e.beg); - cp = storewithlength(cp, m.beg, m.end - m.beg); - return len; -} - -static int -rsapkcs8pubkey(LIBSSH2_SESSION *session, - const unsigned char *data, unsigned int datalen, - const unsigned char *passphrase, void *loadkeydata) -{ - loadpubkeydata *p = (loadpubkeydata *) loadkeydata; - char *buf; - int len; - char *cp; - int i; - char keyform = Qc3_Clear; - char *kek = NULL; - char *kea = NULL; - _libssh2_os400qc3_crypto_ctx *kekctx; - asn1Element subjpubkeyinfo; - asn1Element algorithmid; - asn1Element algorithm; - asn1Element subjpubkey; - asn1Element parameters; - asn1Element pki; - int pkilen; - Qus_EC_t errcode; - - if (!(buf = alloca(datalen))) - return -1; - - switch (pkcs8kek(session, &kekctx, data, datalen, passphrase, &pki)) { - case 1: - keyform = Qc3_Encrypted; - kek = kekctx->key.Key_Context_Token; - kea = kekctx->hash.Alg_Context_Token; - case 0: - break; - default: - return -1; - } - - set_EC_length(errcode, sizeof errcode); - pkilen = pki.end - pki.beg; - Qc3ExtractPublicKey(pki.beg, &pkilen, berstring, &keyform, - kek, kea, buf, (int *) &datalen, &len, &errcode); - _libssh2_os400qc3_crypto_dtor(kekctx); - if (errcode.Bytes_Available) - return -1; - /* Get the algorithm OID and key data from SubjectPublicKeyInfo. */ - if (getASN1Element(&subjpubkeyinfo, buf, buf + len) != buf + len || - *subjpubkeyinfo.header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - cp = getASN1Element(&algorithmid, subjpubkeyinfo.beg, subjpubkeyinfo.end); - if (!cp || *algorithmid.header != (ASN1_SEQ | ASN1_CONSTRUCTED)) - return -1; - if (!getASN1Element(&algorithm, algorithmid.beg, algorithmid.end) || - *algorithm.header != ASN1_OBJ_ID) - return -1; - if (getASN1Element(&subjpubkey, cp, subjpubkeyinfo.end) != - subjpubkeyinfo.end || *subjpubkey.header != ASN1_BIT_STRING) - return -1; - /* Check for supported algorithm. */ - for (i = 0; pka[i].oid; i++) - if (!oidcmp(&algorithm, pka[i].oid)) { - len = (*pka[i].sshpubkey)(session, &p->data, &algorithmid, - &subjpubkey, pka[i].method); - if (len < 0) - return -1; - p->length = len; - p->method = pka[i].method; - return 0; - } - return -1; /* Algorithm not supported. */ -} - -static int -pkcs1topkcs8(LIBSSH2_SESSION *session, - const unsigned char **data8, unsigned int *datalen8, - const unsigned char *data1, unsigned int datalen1) -{ - asn1Element *prvk; - asn1Element *pkcs8; - unsigned char *data; - - *data8 = NULL; - *datalen8 = 0; - if (datalen1 < 2) - return -1; - prvk = asn1_new_from_bytes(data1, datalen1); - if (!prvk) - return -1; - pkcs8 = rsaprivatekeyinfo(prvk); - asn1delete(prvk); - if (!prvk) { - asn1delete(pkcs8); - pkcs8 = NULL; - } - if (!pkcs8) - return -1; - data = (unsigned char *) LIBSSH2_ALLOC(session, pkcs8->end - pkcs8->header); - if (!data) { - asn1delete(pkcs8); - return -1; - } - *data8 = data; - *datalen8 = pkcs8->end - pkcs8->header; - memcpy((char *) data, (char *) pkcs8->header, *datalen8); - asn1delete(pkcs8); - return 0; -} - -static int -rsapkcs1privkey(LIBSSH2_SESSION *session, - const unsigned char *data, unsigned int datalen, - const unsigned char *passphrase, void *loadkeydata) -{ - const unsigned char *data8; - unsigned int datalen8; - int ret; - - if (pkcs1topkcs8(session, &data8, &datalen8, data, datalen)) - return -1; - ret = rsapkcs8privkey(session, data8, datalen8, passphrase, loadkeydata); - LIBSSH2_FREE(session, (char *) data8); - return ret; -} - -static int -rsapkcs1pubkey(LIBSSH2_SESSION *session, - const unsigned char *data, unsigned int datalen, - const unsigned char *passphrase, void *loadkeydata) -{ - const unsigned char *data8; - unsigned int datalen8; - int ret; - - if (pkcs1topkcs8(session, &data8, &datalen8, data, datalen)) - return -1; - ret = rsapkcs8pubkey(session, data8, datalen8, passphrase, loadkeydata); - LIBSSH2_FREE(session, (char *) data8); - return ret; -} - -static int -try_pem_load(LIBSSH2_SESSION *session, FILE *fp, - const unsigned char *passphrase, - const char *header, const char *trailer, - loadkeyproc proc, void *loadkeydata) -{ - unsigned char *data = NULL; - unsigned int datalen = 0; - int c; - int ret; - - fseek(fp, 0L, SEEK_SET); - for (;;) { - ret = _libssh2_pem_parse(session, header, trailer, - fp, &data, &datalen); - - if (!ret) { - ret = (*proc)(session, data, datalen, passphrase, loadkeydata); - if (!ret) - return 0; - } - - if (data) { - LIBSSH2_FREE(session, data); - data = NULL; - } - c = getc(fp); - - if (c == EOF) - break; - - ungetc(c, fp); - } - - return -1; -} - -static int -load_rsa_private_file(LIBSSH2_SESSION *session, const char *filename, - unsigned const char *passphrase, - loadkeyproc proc1, loadkeyproc proc8, void *loadkeydata) -{ - FILE *fp = fopen(filename, fopenrmode); - unsigned char *data = NULL; - size_t datalen = 0; - int ret; - long filesize; - - if (!fp) - return -1; - - /* Try with "ENCRYPTED PRIVATE KEY" PEM armor. - --> PKCS#8 EncryptedPrivateKeyInfo */ - ret = try_pem_load(session, fp, passphrase, beginencprivkeyhdr, - endencprivkeyhdr, proc8, loadkeydata); - - /* Try with "PRIVATE KEY" PEM armor. - --> PKCS#8 PrivateKeyInfo or EncryptedPrivateKeyInfo */ - if (ret) - ret = try_pem_load(session, fp, passphrase, beginprivkeyhdr, - endprivkeyhdr, proc8, loadkeydata); - - /* Try with "RSA PRIVATE KEY" PEM armor. - --> PKCS#1 RSAPrivateKey */ - if (ret) - ret = try_pem_load(session, fp, passphrase, beginrsaprivkeyhdr, - endrsaprivkeyhdr, proc1, loadkeydata); - fclose(fp); - - if (ret) { - /* Try DER encoding. */ - fp = fopen(filename, fopenrbmode); - fseek(fp, 0L, SEEK_END); - filesize = ftell(fp); - - if (filesize <= 32768) { /* Limit to a reasonable size. */ - datalen = filesize; - data = (unsigned char *) alloca(datalen); - if (data) { - fseek(fp, 0L, SEEK_SET); - fread(data, datalen, 1, fp); - - /* Try as PKCS#8 DER data. - --> PKCS#8 PrivateKeyInfo or EncryptedPrivateKeyInfo */ - ret = (*proc8)(session, data, datalen, passphrase, - loadkeydata); - - /* Try as PKCS#1 DER data. - --> PKCS#1 RSAPrivateKey */ - if (ret) - ret = (*proc1)(session, data, datalen, passphrase, - loadkeydata); - } - } - fclose(fp); - } - - return ret; -} - -int -_libssh2_rsa_new_private(libssh2_rsa_ctx **rsa, LIBSSH2_SESSION *session, - const char *filename, unsigned const char *passphrase) -{ - libssh2_rsa_ctx *ctx = libssh2_init_crypto_ctx(NULL); - int ret; - Qc3_Format_ALGD0400_T algd; - Qus_EC_t errcode; - - if (!ctx) - return -1; - ret = load_rsa_private_file(session, filename, passphrase, - rsapkcs1privkey, rsapkcs8privkey, (void *) ctx); - if (!ret) { - /* Create the algorithm context. */ - algd.Public_Key_Alg = Qc3_RSA; - algd.PKA_Block_Format = Qc3_PKCS1_01; - memset(algd.Reserved, 0, sizeof algd.Reserved); - algd.Signing_Hash_Alg = Qc3_SHA1; - set_EC_length(errcode, sizeof errcode); - Qc3CreateAlgorithmContext((char *) &algd, Qc3_Alg_Public_Key, - ctx->hash.Alg_Context_Token, &errcode); - if (errcode.Bytes_Available) - ret = -1; - } - if (ret) { - _libssh2_os400qc3_crypto_dtor(ctx); - ctx = NULL; - } - *rsa = ctx; - return ret; -} - -int -_libssh2_pub_priv_keyfile(LIBSSH2_SESSION *session, - unsigned char **method, size_t *method_len, - unsigned char **pubkeydata, size_t *pubkeydata_len, - const char *privatekey, const char *passphrase) - -{ - loadpubkeydata p; - int ret; - - *method = NULL; - *method_len = 0; - *pubkeydata = NULL; - *pubkeydata_len = 0; - - ret = load_rsa_private_file(session, privatekey, passphrase, - rsapkcs1pubkey, rsapkcs8pubkey, (void *) &p); - if (!ret) { - *method_len = strlen(p.method); - if ((*method = LIBSSH2_ALLOC(session, *method_len))) - memcpy((char *) *method, p.method, *method_len); - else - ret = -1; - } - - if (ret) { - if (*method) - LIBSSH2_FREE(session, *method); - if (p.data) - LIBSSH2_FREE(session, (void *) p.data); - *method = NULL; - *method_len = 0; - } else { - *pubkeydata = (unsigned char *) p.data; - *pubkeydata_len = p.length; - } - - return ret; -} - -int -_libssh2_rsa_new_private_frommemory(libssh2_rsa_ctx **rsa, - LIBSSH2_SESSION *session, - const char *filedata, - size_t filedata_len, - unsigned const char *passphrase) -{ - libssh2_rsa_ctx *ctx = libssh2_init_crypto_ctx(NULL); - unsigned char *data = NULL; - unsigned int datalen = 0; - int ret; - Qc3_Format_ALGD0400_T algd; - Qus_EC_t errcode; - - if (!ctx) - return -1; - - /* Try with "ENCRYPTED PRIVATE KEY" PEM armor. - --> PKCS#8 EncryptedPrivateKeyInfo */ - ret = _libssh2_pem_parse_memory(session, - beginencprivkeyhdr, endencprivkeyhdr, - filedata, filedata_len, &data, &datalen); - - /* Try with "PRIVATE KEY" PEM armor. - --> PKCS#8 PrivateKeyInfo or EncryptedPrivateKeyInfo */ - if (ret) - ret = _libssh2_pem_parse_memory(session, - beginprivkeyhdr, endprivkeyhdr, - filedata, filedata_len, - &data, &datalen); - - if (!ret) { - /* Process PKCS#8. */ - ret = rsapkcs8privkey(session, - data, datalen, passphrase, (void *) &ctx); - } else { - /* Try with "RSA PRIVATE KEY" PEM armor. - --> PKCS#1 RSAPrivateKey */ - ret = _libssh2_pem_parse_memory(session, - beginrsaprivkeyhdr, endrsaprivkeyhdr, - filedata, filedata_len, - &data, &datalen); - if (!ret) - ret = rsapkcs1privkey(session, - data, datalen, passphrase, (void *) &ctx); - } - - if (ret) { - /* Try as PKCS#8 DER data. - --> PKCS#8 PrivateKeyInfo or EncryptedPrivateKeyInfo */ - ret = rsapkcs8privkey(session, filedata, filedata_len, - passphrase, (void *) &ctx); - - /* Try as PKCS#1 DER data. - --> PKCS#1 RSAPrivateKey */ - if (ret) - ret = rsapkcs1privkey(session, filedata, filedata_len, - passphrase, (void *) &ctx); - } - - if (data) - LIBSSH2_FREE(session, data); - - if (!ret) { - /* Create the algorithm context. */ - algd.Public_Key_Alg = Qc3_RSA; - algd.PKA_Block_Format = Qc3_PKCS1_01; - memset(algd.Reserved, 0, sizeof algd.Reserved); - algd.Signing_Hash_Alg = Qc3_SHA1; - set_EC_length(errcode, sizeof errcode); - Qc3CreateAlgorithmContext((char *) &algd, Qc3_Alg_Public_Key, - ctx->hash.Alg_Context_Token, &errcode); - if (errcode.Bytes_Available) - ret = -1; - } - - if (ret) { - _libssh2_os400qc3_crypto_dtor(ctx); - ctx = NULL; - } - - *rsa = ctx; - return ret; -} - -int -_libssh2_pub_priv_keyfilememory(LIBSSH2_SESSION *session, - unsigned char **method, size_t *method_len, - unsigned char **pubkeydata, - size_t *pubkeydata_len, - const char *privatekeydata, - size_t privatekeydata_len, - const char *passphrase) -{ - loadpubkeydata p; - unsigned char *data = NULL; - unsigned int datalen = 0; - const char *meth; - int ret; - - *method = NULL; - *method_len = 0; - *pubkeydata = NULL; - *pubkeydata_len = 0; - - /* Try with "ENCRYPTED PRIVATE KEY" PEM armor. - --> PKCS#8 EncryptedPrivateKeyInfo */ - ret = _libssh2_pem_parse_memory(session, - beginencprivkeyhdr, endencprivkeyhdr, - privatekeydata, privatekeydata_len, - &data, &datalen); - - /* Try with "PRIVATE KEY" PEM armor. - --> PKCS#8 PrivateKeyInfo or EncryptedPrivateKeyInfo */ - if (ret) - ret = _libssh2_pem_parse_memory(session, - beginprivkeyhdr, endprivkeyhdr, - privatekeydata, privatekeydata_len, - &data, &datalen); - - if (!ret) { - /* Process PKCS#8. */ - ret = rsapkcs8pubkey(session, - data, datalen, passphrase, (void *) &p); - } else { - /* Try with "RSA PRIVATE KEY" PEM armor. - --> PKCS#1 RSAPrivateKey */ - ret = _libssh2_pem_parse_memory(session, - beginrsaprivkeyhdr, endrsaprivkeyhdr, - privatekeydata, privatekeydata_len, - &data, &datalen); - if (!ret) - ret = rsapkcs1pubkey(session, - data, datalen, passphrase, (void *) &p); - } - - if (ret) { - /* Try as PKCS#8 DER data. - --> PKCS#8 PrivateKeyInfo or EncryptedPrivateKeyInfo */ - ret = rsapkcs8pubkey(session, privatekeydata, privatekeydata_len, - passphrase, (void *) &p); - - /* Try as PKCS#1 DER data. - --> PKCS#1 RSAPrivateKey */ - if (ret) - ret = rsapkcs1pubkey(session, privatekeydata, privatekeydata_len, - passphrase, (void *) &p); - } - - if (data) - LIBSSH2_FREE(session, data); - - if (!ret) { - *method_len = strlen(p.method); - if ((*method = LIBSSH2_ALLOC(session, *method_len))) - memcpy((char *) *method, p.method, *method_len); - else - ret = -1; - } - if (ret) { - if (*method) - LIBSSH2_FREE(session, *method); - if (p.data) - LIBSSH2_FREE(session, (void *) p.data); - *method = NULL; - *method_len = 0; - } else { - *pubkeydata = (unsigned char *) p.data; - *pubkeydata_len = p.length; - } - - return ret; -} - -int -_libssh2_rsa_sha1_verify(libssh2_rsa_ctx *rsa, - const unsigned char *sig, unsigned long sig_len, - const unsigned char *m, unsigned long m_len) -{ - Qus_EC_t errcode; - int slen = sig_len; - int mlen = m_len; - - set_EC_length(errcode, sizeof errcode); - Qc3VerifySignature((char *) sig, &slen, (char *) m, &mlen, Qc3_Data, - rsa->hash.Alg_Context_Token, Qc3_Alg_Token, - rsa->key.Key_Context_Token, Qc3_Key_Token, anycsp, - NULL, (char *) &errcode); - return errcode.Bytes_Available? -1: 0; -} - -int -_libssh2_os400qc3_rsa_sha1_signv(LIBSSH2_SESSION *session, - unsigned char **signature, - size_t *signature_len, - int veccount, - const struct iovec vector[], - libssh2_rsa_ctx *ctx) -{ - Qus_EC_t errcode; - int siglen; - unsigned char *sig; - char sigbuf[8192]; - int sigbufsize = sizeof sigbuf; - - ctx->hash.Final_Op_Flag = Qc3_Final; - set_EC_length(errcode, sizeof errcode); - Qc3CalculateSignature((char *) vector, &veccount, Qc3_Array, - (char *) &ctx->hash, Qc3_Alg_Token, - (char *) &ctx->key, Qc3_Key_Token, - anycsp, NULL, sigbuf, &sigbufsize, &siglen, - (char *) &errcode); - ctx->hash.Final_Op_Flag = Qc3_Continue; - if (errcode.Bytes_Available) - return -1; - sig = LIBSSH2_ALLOC(session, siglen); - if (!sig) - return -1; - memcpy((char *) sig, sigbuf, siglen); - *signature = sig; - *signature_len = siglen; - return 0; -} - -void -_libssh2_init_aes_ctr(void) -{ -} - -#endif /* LIBSSH2_OS400QC3 */ - -/* vim: set expandtab ts=4 sw=4: */ diff --git a/vendor/libssh2/src/os400qc3.h b/vendor/libssh2/src/os400qc3.h deleted file mode 100644 index dbaa581f60..0000000000 --- a/vendor/libssh2/src/os400qc3.h +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Copyright (C) 2015 Patrick Monnerat, D+H - * All rights reserved. - * - * Redistribution and use in source and binary forms, - * with or without modification, are permitted provided - * that the following conditions are met: - * - * Redistributions of source code must retain the above - * copyright notice, this list of conditions and the - * following disclaimer. - * - * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * Neither the name of the copyright holder nor the names - * of any other contributors may be used to endorse or - * promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. - */ - -#ifndef LIBSSH2_OS400QC3_H -#define LIBSSH2_OS400QC3_H - -#include -#include - -#include - - -/* Redefine character/string literals as always EBCDIC. */ -#undef Qc3_Alg_Token -#define Qc3_Alg_Token "\xC1\xD3\xC7\xC4\xF0\xF1\xF0\xF0" /* ALGD0100 */ -#undef Qc3_Alg_Block_Cipher -#define Qc3_Alg_Block_Cipher "\xC1\xD3\xC7\xC4\xF0\xF2\xF0\xF0" /* ALGD0200 */ -#undef Qc3_Alg_Block_CipherAuth -#define Qc3_Alg_Block_CipherAuth \ - "\xC1\xD3\xC7\xC4\xF0\xF2\xF1\xF0" /* ALGD0210 */ -#undef Qc3_Alg_Stream_Cipher -#define Qc3_Alg_Stream_Cipher \ - "\xC1\xD3\xC7\xC4\xF0\xF3\xF0\xF0" /* ALGD0300 */ -#undef Qc3_Alg_Public_Key -#define Qc3_Alg_Public_Key "\xC1\xD3\xC7\xC4\xF0\xF4\xF0\xF0" /* ALGD0400 */ -#undef Qc3_Alg_Hash -#define Qc3_Alg_Hash "\xC1\xD3\xC7\xC4\xF0\xF5\xF0\xF0" /* ALGD0500 */ -#undef Qc3_Data -#define Qc3_Data "\xC4\xC1\xE3\xC1\xF0\xF1\xF0\xF0" /* DATA0100 */ -#undef Qc3_Array -#define Qc3_Array "\xC4\xC1\xE3\xC1\xF0\xF2\xF0\xF0" /* DATA0200 */ -#undef Qc3_Key_Token -#define Qc3_Key_Token "\xD2\xC5\xE8\xC4\xF0\xF1\xF0\xF0" /* KEYD0100 */ -#undef Qc3_Key_Parms -#define Qc3_Key_Parms "\xD2\xC5\xE8\xC4\xF0\xF2\xF0\xF0" /* KEYD0200 */ -#undef Qc3_Key_KSLabel -#define Qc3_Key_KSLabel "\xD2\xC5\xE8\xC4\xF0\xF4\xF0\xF0" /* KEYD0400 */ -#undef Qc3_Key_PKCS5 -#define Qc3_Key_PKCS5 "\xD2\xC5\xE8\xC4\xF0\xF5\xF0\xF0" /* KEYD0500 */ -#undef Qc3_Key_PEMCert -#define Qc3_Key_PEMCert "\xD2\xC5\xE8\xC4\xF0\xF6\xF0\xF0" /* KEYD0600 */ -#undef Qc3_Key_CSLabel -#define Qc3_Key_CSLabel "\xD2\xC5\xE8\xC4\xF0\xF7\xF0\xF0" /* KEYD0700 */ -#undef Qc3_Key_CSDN -#define Qc3_Key_CSDN "\xD2\xC5\xE8\xC4\xF0\xF8\xF0\xF0" /* KEYD0800 */ -#undef Qc3_Key_AppID -#define Qc3_Key_AppID "\xD2\xC5\xE8\xC4\xF0\xF9\xF0\xF0" /* KEYD0900 */ - -#undef Qc3_ECB -#define Qc3_ECB '\xF0' /* '0' */ -#undef Qc3_CBC -#define Qc3_CBC '\xF1' /* '1' */ -#undef Qc3_OFB -#define Qc3_OFB '\xF2' /* '2' */ -#undef Qc3_CFB1Bit -#define Qc3_CFB1Bit '\xF3' /* '3' */ -#undef Qc3_CFB8Bit -#define Qc3_CFB8Bit '\xF4' /* '4' */ -#undef Qc3_CFB64Bit -#define Qc3_CFB64Bit '\xF5' /* '5' */ -#undef Qc3_CUSP -#define Qc3_CUSP '\xF6' /* '6' */ -#undef Qc3_CTR -#define Qc3_CTR '\xF7' /* '7' */ -#undef Qc3_CCM -#define Qc3_CCM '\xF8' /* '8' */ -#undef Qc3_No_Pad -#define Qc3_No_Pad '\xF0' /* '0' */ -#undef Qc3_Pad_Char -#define Qc3_Pad_Char '\xF1' /* '1' */ -#undef Qc3_Pad_Counter -#define Qc3_Pad_Counter '\xF2' /* '2' */ -#undef Qc3_PKCS1_00 -#define Qc3_PKCS1_00 '\xF0' /* '0' */ -#undef Qc3_PKCS1_01 -#define Qc3_PKCS1_01 '\xF1' /* '1' */ -#undef Qc3_PKCS1_02 -#define Qc3_PKCS1_02 '\xF2' /* '2' */ -#undef Qc3_ISO9796 -#define Qc3_ISO9796 '\xF3' /* '3' */ -#undef Qc3_Zero_Pad -#define Qc3_Zero_Pad '\xF4' /* '4' */ -#undef Qc3_ANSI_X931 -#define Qc3_ANSI_X931 '\xF5' /* '5' */ -#undef Qc3_OAEP -#define Qc3_OAEP '\xF6' /* '6' */ -#undef Qc3_Bin_String -#define Qc3_Bin_String '\xF0' /* '0' */ -#undef Qc3_BER_String -#define Qc3_BER_String '\xF1' /* '1' */ -#undef Qc3_MK_Struct -#define Qc3_MK_Struct '\xF3' /* '3' */ -#undef Qc3_KSLabel_Struct -#define Qc3_KSLabel_Struct '\xF4' /* '4' */ -#undef Qc3_PKCS5_Struct -#define Qc3_PKCS5_Struct '\xF5' /* '5' */ -#undef Qc3_PEMCert_String -#define Qc3_PEMCert_String '\xF6' /* '6' */ -#undef Qc3_CSLabel_String -#define Qc3_CSLabel_String '\xF7' /* '7' */ -#undef Qc3_CSDN_String -#define Qc3_CSDN_String '\xF8' /* '8' */ -#undef Qc3_Clear -#define Qc3_Clear '\xF0' /* '0' */ -#undef Qc3_Encrypted -#define Qc3_Encrypted '\xF1' /* '1' */ -#undef Qc3_MK_Encrypted -#define Qc3_MK_Encrypted '\xF2' /* '2' */ -#undef Qc3_Any_CSP -#define Qc3_Any_CSP '\xF0' /* '0' */ -#undef Qc3_Sfw_CSP -#define Qc3_Sfw_CSP '\xF1' /* '1' */ -#undef Qc3_Hdw_CSP -#define Qc3_Hdw_CSP '\xF2' /* '2' */ -#undef Qc3_Continue -#define Qc3_Continue '\xF0' /* '0' */ -#undef Qc3_Final -#define Qc3_Final '\xF1' /* '1' */ -#undef Qc3_MK_New -#define Qc3_MK_New '\xF0' /* '0' */ -#undef Qc3_MK_Current -#define Qc3_MK_Current '\xF1' /* '1' */ -#undef Qc3_MK_Old -#define Qc3_MK_Old '\xF2' /* '2' */ -#undef Qc3_MK_Pending -#define Qc3_MK_Pending '\xF3' /* '3' */ - - -/* Define which features are supported. */ -#define LIBSSH2_MD5 1 -#define LIBSSH2_HMAC_RIPEMD 0 -#define LIBSSH2_HMAC_SHA256 1 -#define LIBSSH2_HMAC_SHA512 1 - -#define LIBSSH2_AES 1 -#define LIBSSH2_AES_CTR 1 -#define LIBSSH2_BLOWFISH 0 -#define LIBSSH2_RC4 1 -#define LIBSSH2_CAST 0 -#define LIBSSH2_3DES 1 - -#define LIBSSH2_RSA 1 -#define LIBSSH2_DSA 0 - -#define MD5_DIGEST_LENGTH 16 -#define SHA_DIGEST_LENGTH 20 -#define SHA256_DIGEST_LENGTH 32 -#define SHA512_DIGEST_LENGTH 64 - - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: global handles structures. - * - *******************************************************************/ - -/* HMAC & private key algorithms support structure. */ -typedef struct _libssh2_os400qc3_crypto_ctx _libssh2_os400qc3_crypto_ctx; -struct _libssh2_os400qc3_crypto_ctx { - Qc3_Format_ALGD0100_T hash; /* Hash algorithm. */ - Qc3_Format_KEYD0100_T key; /* Key. */ - _libssh2_os400qc3_crypto_ctx * kek; /* Key encryption. */ -}; - -typedef struct { /* Big number. */ - unsigned char * bignum; /* Number bits, little-endian. */ - unsigned int length; /* Length of bignum (# bytes). */ -} _libssh2_bn; - -typedef struct { /* Algorithm description. */ - char * fmt; /* Format of Qc3 structure. */ - int algo; /* Algorithm identifier. */ - unsigned char size; /* Block length. */ - unsigned char mode; /* Block mode. */ - int keylen; /* Key length. */ -} _libssh2_os400qc3_cipher_t; - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: Define global types/codes. - * - *******************************************************************/ - -#define libssh2_crypto_init() -#define libssh2_crypto_exit() - -#define libssh2_sha1_ctx Qc3_Format_ALGD0100_T -#define libssh2_sha256_ctx Qc3_Format_ALGD0100_T -#define libssh2_md5_ctx Qc3_Format_ALGD0100_T -#define libssh2_hmac_ctx _libssh2_os400qc3_crypto_ctx -#define _libssh2_cipher_ctx _libssh2_os400qc3_crypto_ctx - -#define libssh2_sha1_init(x) libssh2_os400qc3_hash_init(x, Qc3_SHA1) -#define libssh2_sha1_update(ctx, data, len) \ - libssh2_os400qc3_hash_update(&(ctx), data, len) -#define libssh2_sha1_final(ctx, out) \ - libssh2_os400qc3_hash_final(&(ctx), out) -#define libssh2_sha256_init(x) libssh2_os400qc3_hash_init(x, Qc3_SHA256) -#define libssh2_sha256_update(ctx, data, len) \ - libssh2_os400qc3_hash_update(&(ctx), data, len) -#define libssh2_sha256_final(ctx, out) \ - libssh2_os400qc3_hash_final(&(ctx), out) -#define libssh2_sha256(message, len, out) \ - libssh2_os400qc3_hash(message, len, out, \ - Qc3_SHA256) -#define libssh2_md5_init(x) libssh2_os400qc3_hash_init(x, Qc3_MD5) -#define libssh2_md5_update(ctx, data, len) \ - libssh2_os400qc3_hash_update(&(ctx), data, len) -#define libssh2_md5_final(ctx, out) \ - libssh2_os400qc3_hash_final(&(ctx), out) -#define libssh2_hmac_ctx_init(ctx) \ - memset((char *) &(ctx), 0, \ - sizeof(libssh2_hmac_ctx)) -#define libssh2_hmac_md5_init(ctx, key, keylen) \ - libssh2_os400qc3_hmac_init(ctx, Qc3_MD5, \ - MD5_DIGEST_LENGTH, \ - key, keylen) -#define libssh2_hmac_sha1_init(ctx, key, keylen) \ - libssh2_os400qc3_hmac_init(ctx, Qc3_SHA1, \ - SHA_DIGEST_LENGTH, \ - key, keylen) -#define libssh2_hmac_sha256_init(ctx, key, keylen) \ - libssh2_os400qc3_hmac_init(ctx, Qc3_SHA256, \ - SHA256_DIGEST_LENGTH, \ - key, keylen) -#define libssh2_hmac_sha512_init(ctx, key, keylen) \ - libssh2_os400qc3_hmac_init(ctx, Qc3_SHA512, \ - SHA512_DIGEST_LENGTH, \ - key, keylen) -#define libssh2_hmac_update(ctx, data, datalen) \ - libssh2_os400qc3_hmac_update(&(ctx), \ - data, datalen) -#define libssh2_hmac_final(ctx, data) \ - libssh2_os400qc3_hmac_final(&(ctx), data) -#define libssh2_hmac_cleanup(ctx) \ - _libssh2_os400qc3_crypto_dtor(ctx) - - -#define _libssh2_bn_ctx int /* Not used. */ - -#define _libssh2_bn_ctx_new() 0 -#define _libssh2_bn_ctx_free(bnctx) ((void) 0) - -#define _libssh2_bn_init_from_bin() _libssh2_bn_init() -#define _libssh2_bn_mod_exp(r, a, p, m, ctx) \ - _libssh2_os400qc3_bn_mod_exp(r, a, p, m) -#define _libssh2_bn_bytes(bn) ((bn)->length) - -#define _libssh2_cipher_type(name) _libssh2_os400qc3_cipher_t name -#define _libssh2_cipher_aes128 {Qc3_Alg_Block_Cipher, Qc3_AES, 16, \ - Qc3_CBC, 16} -#define _libssh2_cipher_aes192 {Qc3_Alg_Block_Cipher, Qc3_AES, 24, \ - Qc3_CBC, 24} -#define _libssh2_cipher_aes256 {Qc3_Alg_Block_Cipher, Qc3_AES, 32, \ - Qc3_CBC, 32} -#define _libssh2_cipher_aes128ctr {Qc3_Alg_Block_Cipher, Qc3_AES, 16, \ - Qc3_CTR, 16} -#define _libssh2_cipher_aes192ctr {Qc3_Alg_Block_Cipher, Qc3_AES, 24, \ - Qc3_CTR, 24} -#define _libssh2_cipher_aes256ctr {Qc3_Alg_Block_Cipher, Qc3_AES, 32, \ - Qc3_CTR, 32} -#define _libssh2_cipher_3des {Qc3_Alg_Block_Cipher, Qc3_TDES, 0, \ - Qc3_CBC, 24} -#define _libssh2_cipher_arcfour {Qc3_Alg_Stream_Cipher, Qc3_RC4, 0, 0, 16} - -#define _libssh2_cipher_dtor(ctx) _libssh2_os400qc3_crypto_dtor(ctx) - -#define libssh2_rsa_ctx _libssh2_os400qc3_crypto_ctx -#define _libssh2_rsa_free(ctx) (_libssh2_os400qc3_crypto_dtor(ctx), \ - free((char *) ctx)) -#define libssh2_prepare_iovec(vec, len) memset((char *) (vec), 0, \ - (len) * sizeof(struct iovec)) -#define _libssh2_rsa_sha1_signv(session, sig, siglen, count, vector, ctx) \ - _libssh2_os400qc3_rsa_sha1_signv(session, sig, siglen, \ - count, vector, ctx) - - -/******************************************************************* - * - * OS/400 QC3 crypto-library backend: Support procedure prototypes. - * - *******************************************************************/ - -extern _libssh2_bn * _libssh2_bn_init(void); -extern void _libssh2_bn_free(_libssh2_bn *bn); -extern unsigned long _libssh2_bn_bits(_libssh2_bn *bn); -extern int _libssh2_bn_from_bin(_libssh2_bn *bn, int len, - const unsigned char *v); -extern int _libssh2_bn_set_word(_libssh2_bn *bn, unsigned long val); -extern int _libssh2_bn_to_bin(_libssh2_bn *bn, unsigned char *val); -extern void _libssh2_random(unsigned char *buf, int len); -extern int _libssh2_bn_rand(_libssh2_bn *bn, int bits, - int top, int bottom); -extern int _libssh2_os400qc3_bn_mod_exp(_libssh2_bn *r, _libssh2_bn *a, - _libssh2_bn *p, _libssh2_bn *m); -extern void _libssh2_os400qc3_crypto_dtor(_libssh2_os400qc3_crypto_ctx *x); -extern int libssh2_os400qc3_hash_init(Qc3_Format_ALGD0100_T *x, - unsigned int algo); -extern void libssh2_os400qc3_hash_update(Qc3_Format_ALGD0100_T *ctx, - unsigned char *data, int len); -extern void libssh2_os400qc3_hash_final(Qc3_Format_ALGD0100_T *ctx, - unsigned char *out); -extern int libssh2_os400qc3_hash(const unsigned char *message, - unsigned long len, unsigned char *out, - unsigned int algo); -extern void libssh2_os400qc3_hmac_init(_libssh2_os400qc3_crypto_ctx *x, - int algo, size_t minkeylen, - void *key, int keylen); -extern void libssh2_os400qc3_hmac_update(_libssh2_os400qc3_crypto_ctx *ctx, - const unsigned char *data, - int len); -extern void libssh2_os400qc3_hmac_final(_libssh2_os400qc3_crypto_ctx *ctx, - unsigned char *out); -extern int _libssh2_os400qc3_rsa_sha1_signv(LIBSSH2_SESSION *session, - unsigned char **signature, - size_t *signature_len, - int veccount, - const struct iovec vector[], - libssh2_rsa_ctx *ctx); - -#endif - -/* vim: set expandtab ts=4 sw=4: */ diff --git a/vendor/libssh2/src/packet.c b/vendor/libssh2/src/packet.c index 5f1feb8c6b..c3756a8ea0 100644 --- a/vendor/libssh2/src/packet.c +++ b/vendor/libssh2/src/packet.c @@ -85,30 +85,53 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, char failure_code = SSH_OPEN_ADMINISTRATIVELY_PROHIBITED; int rc; - (void) datalen; - - if (listen_state->state == libssh2_NB_state_idle) { - unsigned char *s = data + (sizeof("forwarded-tcpip") - 1) + 5; - listen_state->sender_channel = _libssh2_ntohu32(s); - s += 4; - - listen_state->initial_window_size = _libssh2_ntohu32(s); - s += 4; - listen_state->packet_size = _libssh2_ntohu32(s); - s += 4; - - listen_state->host_len = _libssh2_ntohu32(s); - s += 4; - listen_state->host = s; - s += listen_state->host_len; - listen_state->port = _libssh2_ntohu32(s); - s += 4; - - listen_state->shost_len = _libssh2_ntohu32(s); - s += 4; - listen_state->shost = s; - s += listen_state->shost_len; - listen_state->sport = _libssh2_ntohu32(s); + if(listen_state->state == libssh2_NB_state_idle) { + unsigned long offset = (sizeof("forwarded-tcpip") - 1) + 5; + size_t temp_len = 0; + struct string_buf buf; + buf.data = data; + buf.dataptr = buf.data; + buf.len = datalen; + + if(datalen < offset) { + return _libssh2_error(session, LIBSSH2_ERROR_OUT_OF_BOUNDARY, + "Unexpected packet size"); + } + + buf.dataptr += offset; + + if(_libssh2_get_u32(&buf, &(listen_state->sender_channel))) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short extracting channel"); + } + if(_libssh2_get_u32(&buf, &(listen_state->initial_window_size))) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short extracting window size"); + } + if(_libssh2_get_u32(&buf, &(listen_state->packet_size))) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short extracting packet"); + } + if(_libssh2_get_string(&buf, &(listen_state->host), &temp_len)) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short extracting host"); + } + listen_state->host_len = (uint32_t)temp_len; + + if(_libssh2_get_u32(&buf, &(listen_state->port))) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short extracting port"); + } + if(_libssh2_get_string(&buf, &(listen_state->shost), &temp_len)) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short extracting shost"); + } + listen_state->shost_len = (uint32_t)temp_len; + + if(_libssh2_get_u32(&buf, &(listen_state->sport))) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short extracting sport"); + } _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Remote received connection from %s:%ld to %s:%ld", @@ -118,9 +141,9 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, listen_state->state = libssh2_NB_state_allocated; } - if (listen_state->state != libssh2_NB_state_sent) { - while (listn) { - if ((listn->port == (int) listen_state->port) && + if(listen_state->state != libssh2_NB_state_sent) { + while(listn) { + if((listn->port == (int) listen_state->port) && (strlen(listn->host) == listen_state->host_len) && (memcmp (listn->host, listen_state->host, listen_state->host_len) == 0)) { @@ -128,8 +151,8 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, LIBSSH2_CHANNEL *channel = NULL; listen_state->channel = NULL; - if (listen_state->state == libssh2_NB_state_allocated) { - if (listn->queue_maxsize && + if(listen_state->state == libssh2_NB_state_allocated) { + if(listn->queue_maxsize && (listn->queue_maxsize <= listn->queue_size)) { /* Queue is full */ failure_code = SSH_OPEN_RESOURCE_SHORTAGE; @@ -140,7 +163,7 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, } channel = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_CHANNEL)); - if (!channel) { + if(!channel) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate a channel for " "new connection"); @@ -156,7 +179,7 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, channel-> channel_type_len + 1); - if (!channel->channel_type) { + if(!channel->channel_type) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate a channel for new" " connection"); @@ -203,12 +226,12 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, listen_state->state = libssh2_NB_state_created; } - if (listen_state->state == libssh2_NB_state_created) { + if(listen_state->state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, listen_state->packet, 17, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; - else if (rc) { + else if(rc) { listen_state->state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Unable to send channel " @@ -216,7 +239,7 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, } /* Link the channel into the end of the queue list */ - if (listen_state->channel) { + if(listen_state->channel) { _libssh2_list_add(&listn->queue, &listen_state->channel->node); listn->queue_size++; @@ -243,9 +266,10 @@ packet_queue_listener(LIBSSH2_SESSION * session, unsigned char *data, rc = _libssh2_transport_send(session, listen_state->packet, packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { listen_state->state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Unable to send open failure"); @@ -271,21 +295,56 @@ packet_x11_open(LIBSSH2_SESSION * session, unsigned char *data, LIBSSH2_CHANNEL *channel = x11open_state->channel; int rc; - (void) datalen; - - if (x11open_state->state == libssh2_NB_state_idle) { - unsigned char *s = data + (sizeof("x11") - 1) + 5; - x11open_state->sender_channel = _libssh2_ntohu32(s); - s += 4; - x11open_state->initial_window_size = _libssh2_ntohu32(s); - s += 4; - x11open_state->packet_size = _libssh2_ntohu32(s); - s += 4; - x11open_state->shost_len = _libssh2_ntohu32(s); - s += 4; - x11open_state->shost = s; - s += x11open_state->shost_len; - x11open_state->sport = _libssh2_ntohu32(s); + if(x11open_state->state == libssh2_NB_state_idle) { + + unsigned long offset = (sizeof("x11") - 1) + 5; + size_t temp_len = 0; + struct string_buf buf; + buf.data = data; + buf.dataptr = buf.data; + buf.len = datalen; + + if(datalen < offset) { + _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "unexpected data length"); + failure_code = SSH_OPEN_CONNECT_FAILED; + goto x11_exit; + } + + buf.dataptr += offset; + + if(_libssh2_get_u32(&buf, &(x11open_state->sender_channel))) { + _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "unexpected sender channel size"); + failure_code = SSH_OPEN_CONNECT_FAILED; + goto x11_exit; + } + if(_libssh2_get_u32(&buf, &(x11open_state->initial_window_size))) { + _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "unexpected window size"); + failure_code = SSH_OPEN_CONNECT_FAILED; + goto x11_exit; + } + if(_libssh2_get_u32(&buf, &(x11open_state->packet_size))) { + _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "unexpected window size"); + failure_code = SSH_OPEN_CONNECT_FAILED; + goto x11_exit; + } + if(_libssh2_get_string(&buf, &(x11open_state->shost), &temp_len)) { + _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "unexpected host size"); + failure_code = SSH_OPEN_CONNECT_FAILED; + goto x11_exit; + } + x11open_state->shost_len = (uint32_t)temp_len; + + if(_libssh2_get_u32(&buf, &(x11open_state->sport))) { + _libssh2_error(session, LIBSSH2_ERROR_INVAL, + "unexpected port size"); + failure_code = SSH_OPEN_CONNECT_FAILED; + goto x11_exit; + } _libssh2_debug(session, LIBSSH2_TRACE_CONN, "X11 Connection Received from %s:%ld on channel %lu", @@ -295,10 +354,10 @@ packet_x11_open(LIBSSH2_SESSION * session, unsigned char *data, x11open_state->state = libssh2_NB_state_allocated; } - if (session->x11) { - if (x11open_state->state == libssh2_NB_state_allocated) { + if(session->x11) { + if(x11open_state->state == libssh2_NB_state_allocated) { channel = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_CHANNEL)); - if (!channel) { + if(!channel) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "allocate a channel for new connection"); failure_code = SSH_OPEN_RESOURCE_SHORTAGE; @@ -310,7 +369,7 @@ packet_x11_open(LIBSSH2_SESSION * session, unsigned char *data, channel->channel_type = LIBSSH2_ALLOC(session, channel->channel_type_len + 1); - if (!channel->channel_type) { + if(!channel->channel_type) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "allocate a channel for new connection"); LIBSSH2_FREE(session, channel); @@ -350,12 +409,13 @@ packet_x11_open(LIBSSH2_SESSION * session, unsigned char *data, x11open_state->state = libssh2_NB_state_created; } - if (x11open_state->state == libssh2_NB_state_created) { + if(x11open_state->state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, x11open_state->packet, 17, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { x11open_state->state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send channel open " @@ -389,9 +449,10 @@ packet_x11_open(LIBSSH2_SESSION * session, unsigned char *data, rc = _libssh2_transport_send(session, x11open_state->packet, packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { x11open_state->state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Unable to send open failure"); } @@ -416,10 +477,10 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, size_t datalen, int macstate) { int rc = 0; - char *message=NULL; - char *language=NULL; - size_t message_len=0; - size_t language_len=0; + unsigned char *message = NULL; + unsigned char *language = NULL; + size_t message_len = 0; + size_t language_len = 0; LIBSSH2_CHANNEL *channelp = NULL; size_t data_head = 0; unsigned char msg = data[0]; @@ -430,7 +491,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, "Packet type %d received, length=%d", (int) msg, (int) datalen); - if ((macstate == LIBSSH2_MAC_INVALID) && + if((macstate == LIBSSH2_MAC_INVALID) && (!session->macerror || LIBSSH2_MACERROR(session, (char *) data, datalen))) { /* Bad MAC input, but no callback set or non-zero return from the @@ -456,9 +517,9 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, break; } - if (session->packAdd_state == libssh2_NB_state_allocated) { + if(session->packAdd_state == libssh2_NB_state_allocated) { /* A couple exceptions to the packet adding rule: */ - switch (msg) { + switch(msg) { /* byte SSH_MSG_DISCONNECT @@ -469,32 +530,23 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, case SSH_MSG_DISCONNECT: if(datalen >= 5) { - size_t reason = _libssh2_ntohu32(data + 1); - - if(datalen >= 9) { - message_len = _libssh2_ntohu32(data + 5); - - if(message_len < datalen-13) { - /* 9 = packet_type(1) + reason(4) + message_len(4) */ - message = (char *) data + 9; - - language_len = _libssh2_ntohu32(data + 9 + message_len); - language = (char *) data + 9 + message_len + 4; - - if(language_len > (datalen-13-message_len)) { - /* bad input, clear info */ - language = message = NULL; - language_len = message_len = 0; - } - } - else - /* bad size, clear it */ - message_len=0; - } - if (session->ssh_msg_disconnect) { - LIBSSH2_DISCONNECT(session, reason, message, - message_len, language, language_len); + uint32_t reason = 0; + struct string_buf buf; + buf.data = (unsigned char *)data; + buf.dataptr = buf.data; + buf.len = datalen; + buf.dataptr++; /* advance past type */ + + _libssh2_get_u32(&buf, &reason); + _libssh2_get_string(&buf, &message, &message_len); + _libssh2_get_string(&buf, &language, &language_len); + + if(session->ssh_msg_disconnect) { + LIBSSH2_DISCONNECT(session, reason, (const char *)message, + message_len, (const char *)language, + language_len); } + _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Disconnect(%d): %s(%s)", reason, message, language); @@ -511,11 +563,12 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, */ case SSH_MSG_IGNORE: - if (datalen >= 2) { - if (session->ssh_msg_ignore) { + if(datalen >= 2) { + if(session->ssh_msg_ignore) { LIBSSH2_IGNORE(session, (char *) data + 1, datalen - 1); } - } else if (session->ssh_msg_ignore) { + } + else if(session->ssh_msg_ignore) { LIBSSH2_IGNORE(session, "", 0); } LIBSSH2_FREE(session, data); @@ -531,26 +584,27 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, case SSH_MSG_DEBUG: if(datalen >= 2) { - int always_display= data[1]; + int always_display = data[1]; if(datalen >= 6) { - message_len = _libssh2_ntohu32(data + 2); - - if(message_len <= (datalen - 10)) { - /* 6 = packet_type(1) + display(1) + message_len(4) */ - message = (char *) data + 6; - language_len = _libssh2_ntohu32(data + 6 + message_len); - - if(language_len <= (datalen - 10 - message_len)) - language = (char *) data + 10 + message_len; - } + struct string_buf buf; + buf.data = (unsigned char *)data; + buf.dataptr = buf.data; + buf.len = datalen; + buf.dataptr += 2; /* advance past type & always display */ + + _libssh2_get_string(&buf, &message, &message_len); + _libssh2_get_string(&buf, &language, &language_len); } - if (session->ssh_msg_debug) { - LIBSSH2_DEBUG(session, always_display, message, - message_len, language, language_len); + if(session->ssh_msg_debug) { + LIBSSH2_DEBUG(session, always_display, + (const char *)message, + message_len, (const char *)language, + language_len); } } + /* * _libssh2_debug will actually truncate this for us so * that it's not an inordinate about of data @@ -561,6 +615,75 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, session->packAdd_state = libssh2_NB_state_idle; return 0; + /* + byte SSH_MSG_EXT_INFO + uint32 nr-extensions + [repeat "nr-extensions" times] + string extension-name [RFC8308] + string extension-value (binary) + */ + + case SSH_MSG_EXT_INFO: + if(datalen >= 5) { + uint32_t nr_extensions = 0; + struct string_buf buf; + buf.data = (unsigned char *)data; + buf.dataptr = buf.data; + buf.len = datalen; + buf.dataptr += 1; /* advance past type */ + + if(_libssh2_get_u32(&buf, &nr_extensions) != 0) { + rc = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Invalid extension info received"); + } + + while(rc == 0 && nr_extensions > 0) { + + size_t name_len = 0; + size_t value_len = 0; + unsigned char *name = NULL; + unsigned char *value = NULL; + + nr_extensions -= 1; + + _libssh2_get_string(&buf, &name, &name_len); + _libssh2_get_string(&buf, &value, &value_len); + + if(name != NULL && value != NULL) { + _libssh2_debug(session, + LIBSSH2_TRACE_KEX, + "Server to Client extension %.*s: %.*s", + name_len, name, value_len, value); + } + + if(name_len == 15 && + memcmp(name, "server-sig-algs", 15) == 0) { + if(session->server_sign_algorithms) { + LIBSSH2_FREE(session, + session->server_sign_algorithms); + } + + session->server_sign_algorithms = + LIBSSH2_ALLOC(session, + value_len + 1); + + if(session->server_sign_algorithms) { + memcpy(session->server_sign_algorithms, + value, value_len); + session->server_sign_algorithms[value_len] = '\0'; + } + else { + rc = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "memory for server sign algo"); + } + } + } + } + + LIBSSH2_FREE(session, data); + session->packAdd_state = libssh2_NB_state_idle; + return rc; + /* byte SSH_MSG_GLOBAL_REQUEST string request name in US-ASCII only @@ -570,10 +693,10 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, case SSH_MSG_GLOBAL_REQUEST: if(datalen >= 5) { - uint32_t len =0; - unsigned char want_reply=0; + uint32_t len = 0; + unsigned char want_reply = 0; len = _libssh2_ntohu32(data + 1); - if(datalen >= (6 + len)) { + if((len <= (UINT_MAX - 6)) && (datalen >= (6 + len))) { want_reply = data[5 + len]; _libssh2_debug(session, LIBSSH2_TRACE_CONN, @@ -582,13 +705,13 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, } - if (want_reply) { + if(want_reply) { static const unsigned char packet = SSH_MSG_REQUEST_FAILURE; libssh2_packet_add_jump_point5: session->packAdd_state = libssh2_NB_state_jump5; rc = _libssh2_transport_send(session, &packet, 1, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; } } @@ -624,7 +747,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, _libssh2_channel_locate(session, _libssh2_ntohu32(data + 1)); - if (!channelp) { + if(!channelp) { _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_UNKNOWN, "Packet received for unknown channel"); LIBSSH2_FREE(session, data); @@ -634,7 +757,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, #ifdef LIBSSH2DEBUG { uint32_t stream_id = 0; - if (msg == SSH_MSG_CHANNEL_EXTENDED_DATA) + if(msg == SSH_MSG_CHANNEL_EXTENDED_DATA) stream_id = _libssh2_ntohu32(data + 5); _libssh2_debug(session, LIBSSH2_TRACE_CONN, @@ -645,7 +768,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, stream_id); } #endif - if ((channelp->remote.extended_data_ignore_mode == + if((channelp->remote.extended_data_ignore_mode == LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE) && (msg == SSH_MSG_CHANNEL_EXTENDED_DATA)) { /* Pretend we didn't receive this */ @@ -654,14 +777,15 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Ignoring extended data and refunding %d bytes", (int) (datalen - 13)); - if (channelp->read_avail + datalen - data_head >= + if(channelp->read_avail + datalen - data_head >= channelp->remote.window_size) datalen = channelp->remote.window_size - channelp->read_avail + data_head; channelp->remote.window_size -= datalen - data_head; _libssh2_debug(session, LIBSSH2_TRACE_CONN, - "shrinking window size by %lu bytes to %lu, read_avail %lu", + "shrinking window size by %lu bytes to %lu, " + "read_avail %lu", datalen - data_head, channelp->remote.window_size, channelp->read_avail); @@ -675,7 +799,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, packAdd_channelp, datalen - 13, 1, NULL); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; session->packAdd_state = libssh2_NB_state_idle; @@ -686,7 +810,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, * REMEMBER! remote means remote as source of data, * NOT remote window! */ - if (channelp->remote.packet_size < (datalen - data_head)) { + if(channelp->remote.packet_size < (datalen - data_head)) { /* * Spec says we MAY ignore bytes sent beyond * packet_size @@ -697,7 +821,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, " to receive, truncating"); datalen = channelp->remote.packet_size + data_head; } - if (channelp->remote.window_size <= channelp->read_avail) { + if(channelp->remote.window_size <= channelp->read_avail) { /* * Spec says we MAY ignore bytes sent beyond * window_size @@ -713,7 +837,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, /* Reset EOF status */ channelp->remote.eof = 0; - if (channelp->read_avail + datalen - data_head > + if(channelp->read_avail + datalen - data_head > channelp->remote.window_size) { _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_WINDOW_EXCEEDED, @@ -746,7 +870,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, channelp = _libssh2_channel_locate(session, _libssh2_ntohu32(data + 1)); - if (!channelp) + if(!channelp) /* We may have freed already, just quietly ignore this... */ ; else { @@ -775,15 +899,16 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, uint32_t len = _libssh2_ntohu32(data + 5); unsigned char want_reply = 1; - if(len < (datalen - 10)) - want_reply = data[9 + len]; + if((len + 9) < datalen) + want_reply = data[len + 9]; _libssh2_debug(session, LIBSSH2_TRACE_CONN, "Channel %d received request type %.*s (wr %X)", channel, len, data + 9, want_reply); - if (len == sizeof("exit-status") - 1 + if(len == sizeof("exit-status") - 1 + && (sizeof("exit-status") - 1 + 9) <= datalen && !memcmp("exit-status", data + 9, sizeof("exit-status") - 1)) { @@ -792,7 +917,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, channelp = _libssh2_channel_locate(session, channel); - if (channelp) { + if(channelp && (sizeof("exit-status") + 13) <= datalen) { channelp->exit_status = _libssh2_ntohu32(data + 9 + sizeof("exit-status")); _libssh2_debug(session, LIBSSH2_TRACE_CONN, @@ -804,25 +929,34 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, } } - else if (len == sizeof("exit-signal") - 1 + else if(len == sizeof("exit-signal") - 1 + && (sizeof("exit-signal") - 1 + 9) <= datalen && !memcmp("exit-signal", data + 9, sizeof("exit-signal") - 1)) { /* command terminated due to signal */ if(datalen >= 20) channelp = _libssh2_channel_locate(session, channel); - if (channelp) { + if(channelp && (sizeof("exit-signal") + 13) <= datalen) { /* set signal name (without SIG prefix) */ uint32_t namelen = _libssh2_ntohu32(data + 9 + sizeof("exit-signal")); - channelp->exit_signal = - LIBSSH2_ALLOC(session, namelen + 1); - if (!channelp->exit_signal) + + if(namelen <= UINT_MAX - 1) { + channelp->exit_signal = + LIBSSH2_ALLOC(session, namelen + 1); + } + else { + channelp->exit_signal = NULL; + } + + if(!channelp->exit_signal) rc = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "memory for signal name"); - else { + else if((sizeof("exit-signal") + 13 + namelen <= + datalen)) { memcpy(channelp->exit_signal, - data + 13 + sizeof("exit_signal"), namelen); + data + 13 + sizeof("exit-signal"), namelen); channelp->exit_signal[namelen] = '\0'; /* TODO: save error message and language tag */ _libssh2_debug(session, LIBSSH2_TRACE_CONN, @@ -836,14 +970,14 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, } - if (want_reply) { + if(want_reply) { unsigned char packet[5]; libssh2_packet_add_jump_point4: session->packAdd_state = libssh2_NB_state_jump4; packet[0] = SSH_MSG_CHANNEL_FAILURE; - memcpy(&packet[1], data+1, 4); + memcpy(&packet[1], data + 1, 4); rc = _libssh2_transport_send(session, packet, 5, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; } } @@ -861,7 +995,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, channelp = _libssh2_channel_locate(session, _libssh2_ntohu32(data + 1)); - if (!channelp) { + if(!channelp) { /* We may have freed already, just quietly ignore this... */ LIBSSH2_FREE(session, data); session->packAdd_state = libssh2_NB_state_idle; @@ -890,7 +1024,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, case SSH_MSG_CHANNEL_OPEN: if(datalen < 17) ; - else if ((datalen >= (sizeof("forwarded-tcpip") + 4)) && + else if((datalen >= (sizeof("forwarded-tcpip") + 4)) && ((sizeof("forwarded-tcpip") - 1) == _libssh2_ntohu32(data + 1)) && @@ -906,7 +1040,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, rc = packet_queue_listener(session, data, datalen, &session->packAdd_Qlstn_state); } - else if ((datalen >= (sizeof("x11") + 4)) && + else if((datalen >= (sizeof("x11") + 4)) && ((sizeof("x11") - 1) == _libssh2_ntohu32(data + 1)) && (memcmp(data + 5, "x11", sizeof("x11") - 1) == 0)) { @@ -919,7 +1053,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, rc = packet_x11_open(session, data, datalen, &session->packAdd_x11open_state); } - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; LIBSSH2_FREE(session, data); @@ -961,10 +1095,10 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, session->packAdd_state = libssh2_NB_state_sent; } - if (session->packAdd_state == libssh2_NB_state_sent) { + if(session->packAdd_state == libssh2_NB_state_sent) { LIBSSH2_PACKET *packetp = LIBSSH2_ALLOC(session, sizeof(LIBSSH2_PACKET)); - if (!packetp) { + if(!packetp) { _libssh2_debug(session, LIBSSH2_ERROR_ALLOC, "memory for packet"); LIBSSH2_FREE(session, data); @@ -980,10 +1114,10 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, session->packAdd_state = libssh2_NB_state_sent1; } - if ((msg == SSH_MSG_KEXINIT && + if((msg == SSH_MSG_KEXINIT && !(session->state & LIBSSH2_STATE_EXCHANGING_KEYS)) || (session->packAdd_state == libssh2_NB_state_sent2)) { - if (session->packAdd_state == libssh2_NB_state_sent1) { + if(session->packAdd_state == libssh2_NB_state_sent1) { /* * Remote wants new keys * Well, it's already in the brigade, @@ -1012,7 +1146,7 @@ _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, * send NEWKEYS yet, otherwise remote will drop us like a rock */ rc = _libssh2_kex_exchange(session, 1, &session->startup_key_state); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; } @@ -1037,8 +1171,8 @@ _libssh2_packet_ask(LIBSSH2_SESSION * session, unsigned char packet_type, _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Looking for packet of type: %d", (int) packet_type); - while (packet) { - if (packet->data[0] == packet_type + while(packet) { + if(packet->data[0] == packet_type && (packet->data_len >= (match_ofs + match_len)) && (!match_buf || (memcmp(packet->data + match_ofs, match_buf, @@ -1075,7 +1209,7 @@ _libssh2_packet_askv(LIBSSH2_SESSION * session, int i, packet_types_len = strlen((char *) packet_types); for(i = 0; i < packet_types_len; i++) { - if (0 == _libssh2_packet_ask(session, packet_types[i], data, + if(0 == _libssh2_packet_ask(session, packet_types[i], data, data_len, match_ofs, match_buf, match_len)) { return 0; @@ -1102,8 +1236,8 @@ _libssh2_packet_require(LIBSSH2_SESSION * session, unsigned char packet_type, size_t match_len, packet_require_state_t *state) { - if (state->start == 0) { - if (_libssh2_packet_ask(session, packet_type, data, data_len, + if(state->start == 0) { + if(_libssh2_packet_ask(session, packet_type, data, data_len, match_ofs, match_buf, match_len) == 0) { /* A packet was available in the packet brigade */ @@ -1113,26 +1247,28 @@ _libssh2_packet_require(LIBSSH2_SESSION * session, unsigned char packet_type, state->start = time(NULL); } - while (session->socket_state == LIBSSH2_SOCKET_CONNECTED) { + while(session->socket_state == LIBSSH2_SOCKET_CONNECTED) { int ret = _libssh2_transport_read(session); - if (ret == LIBSSH2_ERROR_EAGAIN) + if(ret == LIBSSH2_ERROR_EAGAIN) return ret; - else if (ret < 0) { + else if(ret < 0) { state->start = 0; /* an error which is not just because of blocking */ return ret; - } else if (ret == packet_type) { + } + else if(ret == packet_type) { /* Be lazy, let packet_ask pull it out of the brigade */ ret = _libssh2_packet_ask(session, packet_type, data, data_len, match_ofs, match_buf, match_len); state->start = 0; return ret; - } else if (ret == 0) { + } + else if(ret == 0) { /* nothing available, wait until data arrives or we time out */ long left = LIBSSH2_READ_TIMEOUT - (long)(time(NULL) - state->start); - if (left <= 0) { + if(left <= 0) { state->start = 0; return LIBSSH2_ERROR_TIMEOUT; } @@ -1160,13 +1296,13 @@ _libssh2_packet_burn(LIBSSH2_SESSION * session, unsigned char i, all_packets[255]; int ret; - if (*state == libssh2_NB_state_idle) { + if(*state == libssh2_NB_state_idle) { for(i = 1; i < 255; i++) { all_packets[i - 1] = i; } all_packets[254] = 0; - if (_libssh2_packet_askv(session, all_packets, &data, &data_len, 0, + if(_libssh2_packet_askv(session, all_packets, &data, &data_len, 0, NULL, 0) == 0) { i = data[0]; /* A packet was available in the packet brigade, burn it */ @@ -1179,20 +1315,22 @@ _libssh2_packet_burn(LIBSSH2_SESSION * session, *state = libssh2_NB_state_created; } - while (session->socket_state == LIBSSH2_SOCKET_CONNECTED) { + while(session->socket_state == LIBSSH2_SOCKET_CONNECTED) { ret = _libssh2_transport_read(session); - if (ret == LIBSSH2_ERROR_EAGAIN) { + if(ret == LIBSSH2_ERROR_EAGAIN) { return ret; - } else if (ret < 0) { + } + else if(ret < 0) { *state = libssh2_NB_state_idle; return ret; - } else if (ret == 0) { + } + else if(ret == 0) { /* FIXME: this might busyloop */ continue; } /* Be lazy, let packet_ask pull it out of the brigade */ - if (0 == + if(0 == _libssh2_packet_ask(session, (unsigned char)ret, &data, &data_len, 0, NULL, 0)) { /* Smoke 'em if you got 'em */ @@ -1222,41 +1360,43 @@ _libssh2_packet_requirev(LIBSSH2_SESSION *session, const unsigned char *match_buf, size_t match_len, packet_requirev_state_t * state) { - if (_libssh2_packet_askv(session, packet_types, data, data_len, match_ofs, + if(_libssh2_packet_askv(session, packet_types, data, data_len, match_ofs, match_buf, match_len) == 0) { /* One of the packets listed was available in the packet brigade */ state->start = 0; return 0; } - if (state->start == 0) { + if(state->start == 0) { state->start = time(NULL); } - while (session->socket_state != LIBSSH2_SOCKET_DISCONNECTED) { + while(session->socket_state != LIBSSH2_SOCKET_DISCONNECTED) { int ret = _libssh2_transport_read(session); - if ((ret < 0) && (ret != LIBSSH2_ERROR_EAGAIN)) { + if((ret < 0) && (ret != LIBSSH2_ERROR_EAGAIN)) { state->start = 0; return ret; } - if (ret <= 0) { + if(ret <= 0) { long left = LIBSSH2_READ_TIMEOUT - (long)(time(NULL) - state->start); - if (left <= 0) { + if(left <= 0) { state->start = 0; return LIBSSH2_ERROR_TIMEOUT; } - else if (ret == LIBSSH2_ERROR_EAGAIN) { + else if(ret == LIBSSH2_ERROR_EAGAIN) { return ret; } } - if (strchr((char *) packet_types, ret)) { + if(strchr((char *) packet_types, ret)) { /* Be lazy, let packet_ask pull it out of the brigade */ - return _libssh2_packet_askv(session, packet_types, data, + int ret = _libssh2_packet_askv(session, packet_types, data, data_len, match_ofs, match_buf, match_len); + state->start = 0; + return ret; } } diff --git a/vendor/libssh2/src/packet.h b/vendor/libssh2/src/packet.h index d66b15b50d..79018bcf1d 100644 --- a/vendor/libssh2/src/packet.h +++ b/vendor/libssh2/src/packet.h @@ -1,5 +1,5 @@ -#ifndef LIBSSH2_PACKET_H -#define LIBSSH2_PACKET_H +#ifndef __LIBSSH2_PACKET_H +#define __LIBSSH2_PACKET_H /* * Copyright (C) 2010 by Daniel Stenberg * Author: Daniel Stenberg @@ -73,4 +73,4 @@ int _libssh2_packet_write(LIBSSH2_SESSION * session, unsigned char *data, int _libssh2_packet_add(LIBSSH2_SESSION * session, unsigned char *data, size_t datalen, int macstate); -#endif /* LIBSSH2_PACKET_H */ +#endif /* __LIBSSH2_PACKET_H */ diff --git a/vendor/libssh2/src/pem.c b/vendor/libssh2/src/pem.c index 9f51bba3b5..3416bd528a 100644 --- a/vendor/libssh2/src/pem.c +++ b/vendor/libssh2/src/pem.c @@ -43,23 +43,23 @@ readline(char *line, int line_size, FILE * fp) { size_t len; - if (!line) { + if(!line) { return -1; } - if (!fgets(line, line_size, fp)) { + if(!fgets(line, line_size, fp)) { return -1; } - if (*line) { + if(*line) { len = strlen(line); - if (len > 0 && line[len - 1] == '\n') { + if(len > 0 && line[len - 1] == '\n') { line[len - 1] = '\0'; } } - if (*line) { + if(*line) { len = strlen(line); - if (len > 0 && line[len - 1] == '\r') { + if(len > 0 && line[len - 1] == '\r') { line[len - 1] = '\0'; } } @@ -76,14 +76,14 @@ readline_memory(char *line, size_t line_size, off = *filedata_offset; - for (len = 0; off + len < filedata_len && len < line_size; len++) { - if (filedata[off + len] == '\n' || + for(len = 0; off + len < filedata_len && len < line_size - 1; len++) { + if(filedata[off + len] == '\n' || filedata[off + len] == '\r') { break; } } - if (len) { + if(len) { memcpy(line, filedata + off, len); *filedata_offset += len; } @@ -96,36 +96,88 @@ readline_memory(char *line, size_t line_size, #define LINE_SIZE 128 +static const char *crypt_annotation = "Proc-Type: 4,ENCRYPTED"; + +static unsigned char hex_decode(char digit) +{ + return (digit >= 'A') ? 0xA + (digit - 'A') : (digit - '0'); +} + int _libssh2_pem_parse(LIBSSH2_SESSION * session, const char *headerbegin, const char *headerend, + const unsigned char *passphrase, FILE * fp, unsigned char **data, unsigned int *datalen) { char line[LINE_SIZE]; + unsigned char iv[LINE_SIZE]; char *b64data = NULL; unsigned int b64datalen = 0; int ret; + const LIBSSH2_CRYPT_METHOD *method = NULL; do { *line = '\0'; - if (readline(line, LINE_SIZE, fp)) { + if(readline(line, LINE_SIZE, fp)) { return -1; } } - while (strcmp(line, headerbegin) != 0); + while(strcmp(line, headerbegin) != 0); - *line = '\0'; + if(readline(line, LINE_SIZE, fp)) { + return -1; + } + + if(passphrase && + memcmp(line, crypt_annotation, strlen(crypt_annotation)) == 0) { + const LIBSSH2_CRYPT_METHOD **all_methods, *cur_method; + int i; + + if(readline(line, LINE_SIZE, fp)) { + ret = -1; + goto out; + } + + all_methods = libssh2_crypt_methods(); + while((cur_method = *all_methods++)) { + if(*cur_method->pem_annotation && + memcmp(line, cur_method->pem_annotation, + strlen(cur_method->pem_annotation)) == 0) { + method = cur_method; + memcpy(iv, line + strlen(method->pem_annotation) + 1, + 2*method->iv_len); + } + } + + /* None of the available crypt methods were able to decrypt the key */ + if(method == NULL) + return -1; + + /* Decode IV from hex */ + for(i = 0; i < method->iv_len; ++i) { + iv[i] = hex_decode(iv[2*i]) << 4; + iv[i] |= hex_decode(iv[2*i + 1]); + } + + /* skip to the next line */ + if(readline(line, LINE_SIZE, fp)) { + ret = -1; + goto out; + } + } do { - if (*line) { + if(*line) { char *tmp; size_t linelen; linelen = strlen(line); tmp = LIBSSH2_REALLOC(session, b64data, b64datalen + linelen); - if (!tmp) { + if(!tmp) { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for PEM parsing"); ret = -1; goto out; } @@ -136,25 +188,102 @@ _libssh2_pem_parse(LIBSSH2_SESSION * session, *line = '\0'; - if (readline(line, LINE_SIZE, fp)) { + if(readline(line, LINE_SIZE, fp)) { ret = -1; goto out; } - } while (strcmp(line, headerend) != 0); + } while(strcmp(line, headerend) != 0); - if (!b64data) { + if(!b64data) { return -1; } - if (libssh2_base64_decode(session, (char**) data, datalen, + if(libssh2_base64_decode(session, (char **) data, datalen, b64data, b64datalen)) { ret = -1; goto out; } + if(method) { + /* Set up decryption */ + int free_iv = 0, free_secret = 0, len_decrypted = 0, padding = 0; + int blocksize = method->blocksize; + void *abstract; + unsigned char secret[2*MD5_DIGEST_LENGTH]; + libssh2_md5_ctx fingerprint_ctx; + + /* Perform key derivation (PBKDF1/MD5) */ + if(!libssh2_md5_init(&fingerprint_ctx)) { + ret = -1; + goto out; + } + libssh2_md5_update(fingerprint_ctx, passphrase, + strlen((char *)passphrase)); + libssh2_md5_update(fingerprint_ctx, iv, 8); + libssh2_md5_final(fingerprint_ctx, secret); + if(method->secret_len > MD5_DIGEST_LENGTH) { + if(!libssh2_md5_init(&fingerprint_ctx)) { + ret = -1; + goto out; + } + libssh2_md5_update(fingerprint_ctx, secret, MD5_DIGEST_LENGTH); + libssh2_md5_update(fingerprint_ctx, passphrase, + strlen((char *)passphrase)); + libssh2_md5_update(fingerprint_ctx, iv, 8); + libssh2_md5_final(fingerprint_ctx, secret + MD5_DIGEST_LENGTH); + } + + /* Initialize the decryption */ + if(method->init(session, method, iv, &free_iv, secret, + &free_secret, 0, &abstract)) { + _libssh2_explicit_zero((char *)secret, sizeof(secret)); + LIBSSH2_FREE(session, data); + ret = -1; + goto out; + } + + if(free_secret) { + _libssh2_explicit_zero((char *)secret, sizeof(secret)); + } + + /* Do the actual decryption */ + if((*datalen % blocksize) != 0) { + _libssh2_explicit_zero((char *)secret, sizeof(secret)); + method->dtor(session, &abstract); + _libssh2_explicit_zero(*data, *datalen); + LIBSSH2_FREE(session, *data); + ret = -1; + goto out; + } + + while(len_decrypted <= (int)*datalen - blocksize) { + if(method->crypt(session, *data + len_decrypted, blocksize, + &abstract)) { + ret = LIBSSH2_ERROR_DECRYPT; + _libssh2_explicit_zero((char *)secret, sizeof(secret)); + method->dtor(session, &abstract); + _libssh2_explicit_zero(*data, *datalen); + LIBSSH2_FREE(session, *data); + goto out; + } + + len_decrypted += blocksize; + } + + /* Account for padding */ + padding = (*data)[*datalen - 1]; + memset(&(*data)[*datalen-padding], 0, padding); + *datalen -= padding; + + /* Clean up */ + _libssh2_explicit_zero((char *)secret, sizeof(secret)); + method->dtor(session, &abstract); + } + ret = 0; out: - if (b64data) { + if(b64data) { + _libssh2_explicit_zero(b64data, b64datalen); LIBSSH2_FREE(session, b64data); } return ret; @@ -176,22 +305,24 @@ _libssh2_pem_parse_memory(LIBSSH2_SESSION * session, do { *line = '\0'; - if (readline_memory(line, LINE_SIZE, filedata, filedata_len, &off)) { + if(readline_memory(line, LINE_SIZE, filedata, filedata_len, &off)) { return -1; } } - while (strcmp(line, headerbegin) != 0); + while(strcmp(line, headerbegin) != 0); *line = '\0'; do { - if (*line) { + if(*line) { char *tmp; size_t linelen; linelen = strlen(line); tmp = LIBSSH2_REALLOC(session, b64data, b64datalen + linelen); - if (!tmp) { + if(!tmp) { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for PEM parsing"); ret = -1; goto out; } @@ -202,17 +333,17 @@ _libssh2_pem_parse_memory(LIBSSH2_SESSION * session, *line = '\0'; - if (readline_memory(line, LINE_SIZE, filedata, filedata_len, &off)) { + if(readline_memory(line, LINE_SIZE, filedata, filedata_len, &off)) { ret = -1; goto out; } - } while (strcmp(line, headerend) != 0); + } while(strcmp(line, headerend) != 0); - if (!b64data) { + if(!b64data) { return -1; } - if (libssh2_base64_decode(session, (char**) data, datalen, + if(libssh2_base64_decode(session, (char **) data, datalen, b64data, b64datalen)) { ret = -1; goto out; @@ -220,12 +351,467 @@ _libssh2_pem_parse_memory(LIBSSH2_SESSION * session, ret = 0; out: - if (b64data) { + if(b64data) { + _libssh2_explicit_zero(b64data, b64datalen); + LIBSSH2_FREE(session, b64data); + } + return ret; +} + +/* OpenSSH formatted keys */ +#define AUTH_MAGIC "openssh-key-v1" +#define OPENSSH_HEADER_BEGIN "-----BEGIN OPENSSH PRIVATE KEY-----" +#define OPENSSH_HEADER_END "-----END OPENSSH PRIVATE KEY-----" + +static int +_libssh2_openssh_pem_parse_data(LIBSSH2_SESSION * session, + const unsigned char *passphrase, + const char *b64data, size_t b64datalen, + struct string_buf **decrypted_buf) +{ + const LIBSSH2_CRYPT_METHOD *method = NULL; + struct string_buf decoded, decrypted, kdf_buf; + unsigned char *ciphername = NULL; + unsigned char *kdfname = NULL; + unsigned char *kdf = NULL; + unsigned char *buf = NULL; + unsigned char *salt = NULL; + uint32_t nkeys, check1, check2; + uint32_t rounds = 0; + unsigned char *key = NULL; + unsigned char *key_part = NULL; + unsigned char *iv_part = NULL; + unsigned char *f = NULL; + unsigned int f_len = 0; + int ret = 0, keylen = 0, ivlen = 0, total_len = 0; + size_t kdf_len = 0, tmp_len = 0, salt_len = 0; + + if(decrypted_buf) + *decrypted_buf = NULL; + + /* decode file */ + if(libssh2_base64_decode(session, (char **)&f, &f_len, + b64data, b64datalen)) { + ret = -1; + goto out; + } + + /* Parse the file */ + decoded.data = (unsigned char *)f; + decoded.dataptr = (unsigned char *)f; + decoded.len = f_len; + + if(decoded.len < strlen(AUTH_MAGIC)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, "key too short"); + goto out; + } + + if(strncmp((char *) decoded.dataptr, AUTH_MAGIC, + strlen(AUTH_MAGIC)) != 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "key auth magic mismatch"); + goto out; + } + + decoded.dataptr += strlen(AUTH_MAGIC) + 1; + + if(_libssh2_get_string(&decoded, &ciphername, &tmp_len) || + tmp_len == 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "ciphername is missing"); + goto out; + } + + if(_libssh2_get_string(&decoded, &kdfname, &tmp_len) || + tmp_len == 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "kdfname is missing"); + goto out; + } + + if(_libssh2_get_string(&decoded, &kdf, &kdf_len)) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "kdf is missing"); + goto out; + } + else { + kdf_buf.data = kdf; + kdf_buf.dataptr = kdf; + kdf_buf.len = kdf_len; + } + + if((passphrase == NULL || strlen((const char *)passphrase) == 0) && + strcmp((const char *)ciphername, "none") != 0) { + /* passphrase required */ + ret = LIBSSH2_ERROR_KEYFILE_AUTH_FAILED; + goto out; + } + + if(strcmp((const char *)kdfname, "none") != 0 && + strcmp((const char *)kdfname, "bcrypt") != 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "unknown cipher"); + goto out; + } + + if(!strcmp((const char *)kdfname, "none") && + strcmp((const char *)ciphername, "none") != 0) { + ret =_libssh2_error(session, LIBSSH2_ERROR_PROTO, + "invalid format"); + goto out; + } + + if(_libssh2_get_u32(&decoded, &nkeys) != 0 || nkeys != 1) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Multiple keys are unsupported"); + goto out; + } + + /* unencrypted public key */ + + if(_libssh2_get_string(&decoded, &buf, &tmp_len) || tmp_len == 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Invalid private key; " + "expect embedded public key"); + goto out; + } + + if(_libssh2_get_string(&decoded, &buf, &tmp_len) || tmp_len == 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Private key data not found"); + goto out; + } + + /* decode encrypted private key */ + decrypted.data = decrypted.dataptr = buf; + decrypted.len = tmp_len; + + if(ciphername && strcmp((const char *)ciphername, "none") != 0) { + const LIBSSH2_CRYPT_METHOD **all_methods, *cur_method; + + all_methods = libssh2_crypt_methods(); + while((cur_method = *all_methods++)) { + if(*cur_method->name && + memcmp(ciphername, cur_method->name, + strlen(cur_method->name)) == 0) { + method = cur_method; + } + } + + /* None of the available crypt methods were able to decrypt the key */ + + if(method == NULL) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "No supported cipher found"); + goto out; + } + } + + if(method) { + int free_iv = 0, free_secret = 0, len_decrypted = 0; + int blocksize; + void *abstract = NULL; + + keylen = method->secret_len; + ivlen = method->iv_len; + total_len = keylen + ivlen; + + key = LIBSSH2_CALLOC(session, total_len); + if(key == NULL) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Could not alloc key"); + goto out; + } + + if(strcmp((const char *)kdfname, "bcrypt") == 0 && + passphrase != NULL) { + if((_libssh2_get_string(&kdf_buf, &salt, &salt_len)) || + (_libssh2_get_u32(&kdf_buf, &rounds) != 0) ) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "kdf contains unexpected values"); + LIBSSH2_FREE(session, key); + goto out; + } + + if(_libssh2_bcrypt_pbkdf((const char *)passphrase, + strlen((const char *)passphrase), + salt, salt_len, key, + keylen + ivlen, rounds) < 0) { + ret = _libssh2_error(session, LIBSSH2_ERROR_DECRYPT, + "invalid format"); + LIBSSH2_FREE(session, key); + goto out; + } + } + else { + ret = _libssh2_error(session, LIBSSH2_ERROR_KEYFILE_AUTH_FAILED, + "bcrypted without passphrase"); + LIBSSH2_FREE(session, key); + goto out; + } + + /* Set up decryption */ + blocksize = method->blocksize; + + key_part = LIBSSH2_CALLOC(session, keylen); + if(key_part == NULL) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Could not alloc key part"); + goto out; + } + + iv_part = LIBSSH2_CALLOC(session, ivlen); + if(iv_part == NULL) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Could not alloc iv part"); + goto out; + } + + memcpy(key_part, key, keylen); + memcpy(iv_part, key + keylen, ivlen); + + /* Initialize the decryption */ + if(method->init(session, method, iv_part, &free_iv, key_part, + &free_secret, 0, &abstract)) { + ret = LIBSSH2_ERROR_DECRYPT; + goto out; + } + + /* Do the actual decryption */ + if((decrypted.len % blocksize) != 0) { + method->dtor(session, &abstract); + ret = LIBSSH2_ERROR_DECRYPT; + goto out; + } + + while((size_t)len_decrypted <= decrypted.len - blocksize) { + if(method->crypt(session, decrypted.data + len_decrypted, + blocksize, + &abstract)) { + ret = LIBSSH2_ERROR_DECRYPT; + method->dtor(session, &abstract); + goto out; + } + + len_decrypted += blocksize; + } + + /* No padding */ + + method->dtor(session, &abstract); + } + + /* Check random bytes match */ + + if(_libssh2_get_u32(&decrypted, &check1) != 0 || + _libssh2_get_u32(&decrypted, &check2) != 0 || + check1 != check2) { + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Private key unpack failed (correct password?)"); + ret = LIBSSH2_ERROR_KEYFILE_AUTH_FAILED; + goto out; + } + + if(decrypted_buf != NULL) { + /* copy data to out-going buffer */ + struct string_buf *out_buf = _libssh2_string_buf_new(session); + if(!out_buf) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for " + "decrypted struct"); + goto out; + } + + out_buf->data = LIBSSH2_CALLOC(session, decrypted.len); + if(out_buf->data == NULL) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for " + "decrypted struct"); + _libssh2_string_buf_free(session, out_buf); + goto out; + } + memcpy(out_buf->data, decrypted.data, decrypted.len); + out_buf->dataptr = out_buf->data + + (decrypted.dataptr - decrypted.data); + out_buf->len = decrypted.len; + + *decrypted_buf = out_buf; + } + +out: + + /* Clean up */ + if(key) { + _libssh2_explicit_zero(key, total_len); + LIBSSH2_FREE(session, key); + } + if(key_part) { + _libssh2_explicit_zero(key_part, keylen); + LIBSSH2_FREE(session, key_part); + } + if(iv_part) { + _libssh2_explicit_zero(iv_part, ivlen); + LIBSSH2_FREE(session, iv_part); + } + if(f) { + _libssh2_explicit_zero(f, f_len); + LIBSSH2_FREE(session, f); + } + + return ret; +} + +int +_libssh2_openssh_pem_parse(LIBSSH2_SESSION * session, + const unsigned char *passphrase, + FILE * fp, struct string_buf **decrypted_buf) +{ + char line[LINE_SIZE]; + char *b64data = NULL; + unsigned int b64datalen = 0; + int ret = 0; + + /* read file */ + + do { + *line = '\0'; + + if(readline(line, LINE_SIZE, fp)) { + return -1; + } + } + while(strcmp(line, OPENSSH_HEADER_BEGIN) != 0); + + if(readline(line, LINE_SIZE, fp)) { + return -1; + } + + do { + if(*line) { + char *tmp; + size_t linelen; + + linelen = strlen(line); + tmp = LIBSSH2_REALLOC(session, b64data, b64datalen + linelen); + if(!tmp) { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for PEM parsing"); + ret = -1; + goto out; + } + memcpy(tmp + b64datalen, line, linelen); + b64data = tmp; + b64datalen += linelen; + } + + *line = '\0'; + + if(readline(line, LINE_SIZE, fp)) { + ret = -1; + goto out; + } + } while(strcmp(line, OPENSSH_HEADER_END) != 0); + + if(!b64data) { + return -1; + } + + ret = _libssh2_openssh_pem_parse_data(session, + passphrase, + (const char *)b64data, + (size_t)b64datalen, + decrypted_buf); + + if(b64data) { + _libssh2_explicit_zero(b64data, b64datalen); LIBSSH2_FREE(session, b64data); } + +out: + return ret; } +int +_libssh2_openssh_pem_parse_memory(LIBSSH2_SESSION * session, + const unsigned char *passphrase, + const char *filedata, size_t filedata_len, + struct string_buf **decrypted_buf) +{ + char line[LINE_SIZE]; + char *b64data = NULL; + unsigned int b64datalen = 0; + size_t off = 0; + int ret; + + if(filedata == NULL || filedata_len <= 0) + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Error parsing PEM: filedata missing"); + + do { + + *line = '\0'; + + if(off >= filedata_len) + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Error parsing PEM: offset out of bounds"); + + if(readline_memory(line, LINE_SIZE, filedata, filedata_len, &off)) { + return -1; + } + } + while(strcmp(line, OPENSSH_HEADER_BEGIN) != 0); + + *line = '\0'; + + do { + if (*line) { + char *tmp; + size_t linelen; + + linelen = strlen(line); + tmp = LIBSSH2_REALLOC(session, b64data, b64datalen + linelen); + if(!tmp) { + ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for " + "PEM parsing"); + goto out; + } + memcpy(tmp + b64datalen, line, linelen); + b64data = tmp; + b64datalen += linelen; + } + + *line = '\0'; + + if(off >= filedata_len) { + ret = _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Error parsing PEM: offset out of bounds"); + goto out; + } + + if(readline_memory(line, LINE_SIZE, filedata, filedata_len, &off)) { + ret = -1; + goto out; + } + } while(strcmp(line, OPENSSH_HEADER_END) != 0); + + if(!b64data) + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Error parsing PEM: base 64 data missing"); + + ret = _libssh2_openssh_pem_parse_data(session, passphrase, b64data, + b64datalen, decrypted_buf); + +out: + if(b64data) { + _libssh2_explicit_zero(b64data, b64datalen); + LIBSSH2_FREE(session, b64data); + } + return ret; + +} + static int read_asn1_length(const unsigned char *data, unsigned int datalen, unsigned int *len) @@ -233,27 +819,28 @@ read_asn1_length(const unsigned char *data, unsigned int lenlen; int nextpos; - if (datalen < 1) { + if(datalen < 1) { return -1; } *len = data[0]; - if (*len >= 0x80) { + if(*len >= 0x80) { lenlen = *len & 0x7F; *len = data[1]; - if (1 + lenlen > datalen) { + if(1 + lenlen > datalen) { return -1; } - if (lenlen > 1) { + if(lenlen > 1) { *len <<= 8; *len |= data[2]; } - } else { + } + else { lenlen = 0; } nextpos = 1 + lenlen; - if (lenlen > 2 || 1 + lenlen + *len > datalen) { + if(lenlen > 2 || 1 + lenlen + *len > datalen) { return -1; } @@ -266,11 +853,11 @@ _libssh2_pem_decode_sequence(unsigned char **data, unsigned int *datalen) unsigned int len; int lenlen; - if (*datalen < 1) { + if(*datalen < 1) { return -1; } - if ((*data)[0] != '\x30') { + if((*data)[0] != '\x30') { return -1; } @@ -278,7 +865,7 @@ _libssh2_pem_decode_sequence(unsigned char **data, unsigned int *datalen) (*datalen)--; lenlen = read_asn1_length(*data, *datalen, &len); - if (lenlen < 0 || lenlen + len != *datalen) { + if(lenlen < 0 || lenlen + len != *datalen) { return -1; } @@ -295,11 +882,11 @@ _libssh2_pem_decode_integer(unsigned char **data, unsigned int *datalen, unsigned int len; int lenlen; - if (*datalen < 1) { + if(*datalen < 1) { return -1; } - if ((*data)[0] != '\x02') { + if((*data)[0] != '\x02') { return -1; } @@ -307,7 +894,7 @@ _libssh2_pem_decode_integer(unsigned char **data, unsigned int *datalen, (*datalen)--; lenlen = read_asn1_length(*data, *datalen, &len); - if (lenlen < 0 || lenlen + len > *datalen) { + if(lenlen < 0 || lenlen + len > *datalen) { return -1; } diff --git a/vendor/libssh2/src/publickey.c b/vendor/libssh2/src/publickey.c index bfee0a8420..f26c6327dc 100644 --- a/vendor/libssh2/src/publickey.c +++ b/vendor/libssh2/src/publickey.c @@ -60,7 +60,7 @@ static const LIBSSH2_PUBLICKEY_CODE_LIST publickey_response_codes[] = {LIBSSH2_PUBLICKEY_RESPONSE_STATUS, "status", sizeof("status") - 1}, {LIBSSH2_PUBLICKEY_RESPONSE_VERSION, "version", sizeof("version") - 1}, {LIBSSH2_PUBLICKEY_RESPONSE_PUBLICKEY, "publickey", - sizeof("publickey") - 1} , + sizeof("publickey") - 1}, {0, NULL, 0} }; @@ -78,13 +78,13 @@ static const LIBSSH2_PUBLICKEY_CODE_LIST publickey_response_codes[] = #define LIBSSH2_PUBLICKEY_STATUS_CODE_MAX 8 static const LIBSSH2_PUBLICKEY_CODE_LIST publickey_status_codes[] = { - {LIBSSH2_PUBLICKEY_SUCCESS, "success", sizeof("success") - 1} , + {LIBSSH2_PUBLICKEY_SUCCESS, "success", sizeof("success") - 1}, {LIBSSH2_PUBLICKEY_ACCESS_DENIED, "access denied", sizeof("access denied") - 1}, {LIBSSH2_PUBLICKEY_STORAGE_EXCEEDED, "storage exceeded", - sizeof("storage exceeded") - 1} , + sizeof("storage exceeded") - 1}, {LIBSSH2_PUBLICKEY_VERSION_NOT_SUPPORTED, "version not supported", - sizeof("version not supported") - 1} , + sizeof("version not supported") - 1}, {LIBSSH2_PUBLICKEY_KEY_NOT_FOUND, "key not found", sizeof("key not found") - 1}, {LIBSSH2_PUBLICKEY_KEY_NOT_SUPPORTED, "key not supported", @@ -110,13 +110,14 @@ publickey_status_error(const LIBSSH2_PUBLICKEY *pkey, const char *msg; /* GENERAL_FAILURE got remapped between version 1 and 2 */ - if (status == 6 && pkey && pkey->version == 1) { + if(status == 6 && pkey && pkey->version == 1) { status = 7; } - if (status < 0 || status > LIBSSH2_PUBLICKEY_STATUS_CODE_MAX) { + if(status < 0 || status > LIBSSH2_PUBLICKEY_STATUS_CODE_MAX) { msg = "unknown"; - } else { + } + else { msg = publickey_status_codes[status].name; } @@ -139,11 +140,12 @@ publickey_packet_receive(LIBSSH2_PUBLICKEY * pkey, *data = NULL; /* default to nothing returned */ *data_len = 0; - if (pkey->receive_state == libssh2_NB_state_idle) { + if(pkey->receive_state == libssh2_NB_state_idle) { rc = _libssh2_channel_read(channel, 0, (char *) buffer, 4); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc != 4) { + } + else if(rc != 4) { return _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_PROTOCOL, "Invalid response from publickey subsystem"); } @@ -151,7 +153,7 @@ publickey_packet_receive(LIBSSH2_PUBLICKEY * pkey, pkey->receive_packet_len = _libssh2_ntohu32(buffer); pkey->receive_packet = LIBSSH2_ALLOC(session, pkey->receive_packet_len); - if (!pkey->receive_packet) { + if(!pkey->receive_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate publickey response " "buffer"); @@ -160,12 +162,13 @@ publickey_packet_receive(LIBSSH2_PUBLICKEY * pkey, pkey->receive_state = libssh2_NB_state_sent; } - if (pkey->receive_state == libssh2_NB_state_sent) { + if(pkey->receive_state == libssh2_NB_state_sent) { rc = _libssh2_channel_read(channel, 0, (char *) pkey->receive_packet, pkey->receive_packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc != (int)pkey->receive_packet_len) { + } + else if(rc != (int)pkey->receive_packet_len) { LIBSSH2_FREE(session, pkey->receive_packet); pkey->receive_packet = NULL; pkey->receive_state = libssh2_NB_state_idle; @@ -195,20 +198,20 @@ publickey_response_id(unsigned char **pdata, size_t data_len) unsigned char *data = *pdata; const LIBSSH2_PUBLICKEY_CODE_LIST *codes = publickey_response_codes; - if (data_len < 4) { + if(data_len < 4) { /* Malformed response */ return -1; } response_len = _libssh2_ntohu32(data); data += 4; data_len -= 4; - if (data_len < response_len) { + if(data_len < response_len) { /* Malformed response */ return -1; } - while (codes->name) { - if ((unsigned long)codes->name_len == response_len && + while(codes->name) { + if((unsigned long)codes->name_len == response_len && strncmp(codes->name, (char *) data, response_len) == 0) { *pdata = data + response_len; return codes->code; @@ -231,28 +234,41 @@ publickey_response_success(LIBSSH2_PUBLICKEY * pkey) size_t data_len; int response; - while (1) { + while(1) { int rc = publickey_packet_receive(pkey, &data, &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_TIMEOUT, "Timeout waiting for response from " "publickey subsystem"); } + if(data_len < 4) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Publickey response too small"); + } + s = data; response = publickey_response_id(&s, data_len); - switch (response) { + switch(response) { case LIBSSH2_PUBLICKEY_RESPONSE_STATUS: /* Error, or processing complete */ { - unsigned long status = _libssh2_ntohu32(s); + unsigned long status = 0; + + if(data_len < 8) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Publickey response too small"); + } + + status = _libssh2_ntohu32(s); LIBSSH2_FREE(session, data); - if (status == LIBSSH2_PUBLICKEY_SUCCESS) + if(status == LIBSSH2_PUBLICKEY_SUCCESS) return 0; publickey_status_error(pkey, session, status); @@ -260,7 +276,7 @@ publickey_response_success(LIBSSH2_PUBLICKEY * pkey) } default: LIBSSH2_FREE(session, data); - if (response < 0) { + if(response < 0) { return _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_PROTOCOL, "Invalid publickey subsystem response"); @@ -289,7 +305,7 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) int response; int rc; - if (session->pkeyInit_state == libssh2_NB_state_idle) { + if(session->pkeyInit_state == libssh2_NB_state_idle) { session->pkeyInit_data = NULL; session->pkeyInit_pkey = NULL; session->pkeyInit_channel = NULL; @@ -300,7 +316,7 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) session->pkeyInit_state = libssh2_NB_state_allocated; } - if (session->pkeyInit_state == libssh2_NB_state_allocated) { + if(session->pkeyInit_state == libssh2_NB_state_allocated) { session->pkeyInit_channel = _libssh2_channel_open(session, "session", @@ -308,8 +324,8 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) LIBSSH2_CHANNEL_WINDOW_DEFAULT, LIBSSH2_CHANNEL_PACKET_DEFAULT, NULL, 0); - if (!session->pkeyInit_channel) { - if (libssh2_session_last_errno(session) == LIBSSH2_ERROR_EAGAIN) + if(!session->pkeyInit_channel) { + if(libssh2_session_last_errno(session) == LIBSSH2_ERROR_EAGAIN) /* The error state is already set, so leave it */ return NULL; _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_FAILURE, @@ -320,17 +336,18 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) session->pkeyInit_state = libssh2_NB_state_sent; } - if (session->pkeyInit_state == libssh2_NB_state_sent) { + if(session->pkeyInit_state == libssh2_NB_state_sent) { rc = _libssh2_channel_process_startup(session->pkeyInit_channel, "subsystem", sizeof("subsystem") - 1, "publickey", sizeof("publickey") - 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block starting publickey subsystem"); return NULL; - } else if (rc) { + } + else if(rc) { _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_FAILURE, "Unable to request publickey subsystem"); goto err_exit; @@ -339,11 +356,11 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) session->pkeyInit_state = libssh2_NB_state_sent1; } - if (session->pkeyInit_state == libssh2_NB_state_sent1) { + if(session->pkeyInit_state == libssh2_NB_state_sent1) { unsigned char *s; rc = _libssh2_channel_extended_data(session->pkeyInit_channel, - LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE); - if (rc == LIBSSH2_ERROR_EAGAIN) { + LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE); + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block starting publickey subsystem"); return NULL; @@ -351,7 +368,7 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) session->pkeyInit_pkey = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_PUBLICKEY)); - if (!session->pkeyInit_pkey) { + if(!session->pkeyInit_pkey) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate a new publickey structure"); goto err_exit; @@ -377,15 +394,16 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) session->pkeyInit_state = libssh2_NB_state_sent2; } - if (session->pkeyInit_state == libssh2_NB_state_sent2) { + if(session->pkeyInit_state == libssh2_NB_state_sent2) { rc = _libssh2_channel_write(session->pkeyInit_channel, 0, session->pkeyInit_buffer, 19 - session->pkeyInit_buffer_sent); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block sending publickey version packet"); return NULL; - } else if (rc < 0) { + } + else if(rc < 0) { _libssh2_error(session, rc, "Unable to send publickey version packet"); goto err_exit; @@ -400,18 +418,19 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) session->pkeyInit_state = libssh2_NB_state_sent3; } - if (session->pkeyInit_state == libssh2_NB_state_sent3) { - while (1) { + if(session->pkeyInit_state == libssh2_NB_state_sent3) { + while(1) { unsigned char *s; rc = publickey_packet_receive(session->pkeyInit_pkey, &session->pkeyInit_data, &session->pkeyInit_data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for response from " "publickey subsystem"); return NULL; - } else if (rc) { + } + else if(rc) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_TIMEOUT, "Timeout waiting for response from " "publickey subsystem"); @@ -419,31 +438,62 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) } s = session->pkeyInit_data; - if ((response = + if((response = publickey_response_id(&s, session->pkeyInit_data_len)) < 0) { _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_PROTOCOL, "Invalid publickey subsystem response code"); goto err_exit; } - switch (response) { + if(session->pkeyInit_data_len < 4) { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Public key init data too small"); + goto err_exit; + } + + switch(response) { case LIBSSH2_PUBLICKEY_RESPONSE_STATUS: /* Error */ { unsigned long status, descr_len, lang_len; - status = _libssh2_ntohu32(s); - s += 4; - descr_len = _libssh2_ntohu32(s); - s += 4; - /* description starts here */ - s += descr_len; - lang_len = _libssh2_ntohu32(s); - s += 4; - /* lang starts here */ - s += lang_len; - - if (s > + if(session->pkeyInit_data_len >= 8) { + status = _libssh2_ntohu32(s); + s += 4; + descr_len = _libssh2_ntohu32(s); + s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Public key init data too small"); + goto err_exit; + } + + if(s + descr_len + 4 <= + session->pkeyInit_data + session->pkeyInit_data_len) { + /* description starts here */ + s += descr_len; + lang_len = _libssh2_ntohu32(s); + s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Public key init data too small"); + goto err_exit; + } + + if(s + lang_len <= + session->pkeyInit_data + session->pkeyInit_data_len) { + /* lang starts here */ + s += lang_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Public key init data too small"); + goto err_exit; + } + + if(s > session->pkeyInit_data + session->pkeyInit_data_len) { _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_PROTOCOL, @@ -459,10 +509,11 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) case LIBSSH2_PUBLICKEY_RESPONSE_VERSION: /* What we want */ session->pkeyInit_pkey->version = _libssh2_ntohu32(s); - if (session->pkeyInit_pkey->version > + if(session->pkeyInit_pkey->version > LIBSSH2_PUBLICKEY_VERSION) { _libssh2_debug(session, LIBSSH2_TRACE_PUBLICKEY, - "Truncate remote publickey version from %lu", + "Truncate remote publickey version " + "from %lu", session->pkeyInit_pkey->version); session->pkeyInit_pkey->version = LIBSSH2_PUBLICKEY_VERSION; @@ -489,19 +540,19 @@ static LIBSSH2_PUBLICKEY *publickey_init(LIBSSH2_SESSION *session) /* Never reached except by direct goto */ err_exit: session->pkeyInit_state = libssh2_NB_state_sent4; - if (session->pkeyInit_channel) { + if(session->pkeyInit_channel) { rc = _libssh2_channel_close(session->pkeyInit_channel); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block closing channel"); return NULL; } } - if (session->pkeyInit_pkey) { + if(session->pkeyInit_pkey) { LIBSSH2_FREE(session, session->pkeyInit_pkey); session->pkeyInit_pkey = NULL; } - if (session->pkeyInit_data) { + if(session->pkeyInit_data) { LIBSSH2_FREE(session, session->pkeyInit_data); session->pkeyInit_data = NULL; } @@ -553,16 +604,16 @@ libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, const unsigned char *name, channel = pkey->channel; session = channel->session; - if (pkey->add_state == libssh2_NB_state_idle) { + if(pkey->add_state == libssh2_NB_state_idle) { pkey->add_packet = NULL; _libssh2_debug(session, LIBSSH2_TRACE_PUBLICKEY, "Adding %s publickey", name); - if (pkey->version == 1) { + if(pkey->version == 1) { for(i = 0; i < num_attrs; i++) { /* Search for a comment attribute */ - if (attrs[i].name_len == (sizeof("comment") - 1) && + if(attrs[i].name_len == (sizeof("comment") - 1) && strncmp(attrs[i].name, "comment", sizeof("comment") - 1) == 0) { comment = (unsigned char *) attrs[i].value; @@ -571,7 +622,8 @@ libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, const unsigned char *name, } } packet_len += 4 + comment_len; - } else { + } + else { packet_len += 5; /* overwrite(1) + attribute_count(4) */ for(i = 0; i < num_attrs; i++) { packet_len += 9 + attrs[i].name_len + attrs[i].value_len; @@ -580,7 +632,7 @@ libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, const unsigned char *name, } pkey->add_packet = LIBSSH2_ALLOC(session, packet_len); - if (!pkey->add_packet) { + if(!pkey->add_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "publickey \"add\" packet"); @@ -593,10 +645,10 @@ libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, const unsigned char *name, pkey->add_s += 4; memcpy(pkey->add_s, "add", sizeof("add") - 1); pkey->add_s += sizeof("add") - 1; - if (pkey->version == 1) { + if(pkey->version == 1) { _libssh2_htonu32(pkey->add_s, comment_len); pkey->add_s += 4; - if (comment) { + if(comment) { memcpy(pkey->add_s, comment, comment_len); pkey->add_s += comment_len; } @@ -609,7 +661,8 @@ libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, const unsigned char *name, pkey->add_s += 4; memcpy(pkey->add_s, blob, blob_len); pkey->add_s += blob_len; - } else { + } + else { /* Version == 2 */ _libssh2_htonu32(pkey->add_s, name_len); @@ -644,12 +697,13 @@ libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, const unsigned char *name, pkey->add_state = libssh2_NB_state_created; } - if (pkey->add_state == libssh2_NB_state_created) { + if(pkey->add_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, pkey->add_packet, (pkey->add_s - pkey->add_packet)); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if ((pkey->add_s - pkey->add_packet) != rc) { + } + else if((pkey->add_s - pkey->add_packet) != rc) { LIBSSH2_FREE(session, pkey->add_packet); pkey->add_packet = NULL; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, @@ -662,7 +716,7 @@ libssh2_publickey_add_ex(LIBSSH2_PUBLICKEY *pkey, const unsigned char *name, } rc = publickey_response_success(pkey); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } @@ -693,11 +747,11 @@ libssh2_publickey_remove_ex(LIBSSH2_PUBLICKEY * pkey, channel = pkey->channel; session = channel->session; - if (pkey->remove_state == libssh2_NB_state_idle) { + if(pkey->remove_state == libssh2_NB_state_idle) { pkey->remove_packet = NULL; pkey->remove_packet = LIBSSH2_ALLOC(session, packet_len); - if (!pkey->remove_packet) { + if(!pkey->remove_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "publickey \"remove\" packet"); @@ -727,12 +781,13 @@ libssh2_publickey_remove_ex(LIBSSH2_PUBLICKEY * pkey, pkey->remove_state = libssh2_NB_state_created; } - if (pkey->remove_state == libssh2_NB_state_created) { + if(pkey->remove_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, pkey->remove_packet, (pkey->remove_s - pkey->remove_packet)); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if ((pkey->remove_s - pkey->remove_packet) != rc) { + } + else if((pkey->remove_s - pkey->remove_packet) != rc) { LIBSSH2_FREE(session, pkey->remove_packet); pkey->remove_packet = NULL; pkey->remove_state = libssh2_NB_state_idle; @@ -746,7 +801,7 @@ libssh2_publickey_remove_ex(LIBSSH2_PUBLICKEY * pkey, } rc = publickey_response_success(pkey); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } @@ -776,7 +831,7 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, channel = pkey->channel; session = channel->session; - if (pkey->listFetch_state == libssh2_NB_state_idle) { + if(pkey->listFetch_state == libssh2_NB_state_idle) { pkey->listFetch_data = NULL; pkey->listFetch_s = pkey->listFetch_buffer; @@ -793,14 +848,15 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, pkey->listFetch_state = libssh2_NB_state_created; } - if (pkey->listFetch_state == libssh2_NB_state_created) { + if(pkey->listFetch_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, pkey->listFetch_buffer, (pkey->listFetch_s - pkey->listFetch_buffer)); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if ((pkey->listFetch_s - pkey->listFetch_buffer) != rc) { + } + else if((pkey->listFetch_s - pkey->listFetch_buffer) != rc) { pkey->listFetch_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send publickey list packet"); @@ -809,12 +865,13 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, pkey->listFetch_state = libssh2_NB_state_sent; } - while (1) { + while(1) { rc = publickey_packet_receive(pkey, &pkey->listFetch_data, &pkey->listFetch_data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_TIMEOUT, "Timeout waiting for response from " "publickey subsystem"); @@ -822,7 +879,7 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, } pkey->listFetch_s = pkey->listFetch_data; - if ((response = + if((response = publickey_response_id(&pkey->listFetch_s, pkey->listFetch_data_len)) < 0) { _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_PROTOCOL, @@ -830,31 +887,57 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, goto err_exit; } - switch (response) { + switch(response) { case LIBSSH2_PUBLICKEY_RESPONSE_STATUS: /* Error, or processing complete */ { unsigned long status, descr_len, lang_len; - status = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - descr_len = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - /* description starts at pkey->listFetch_s */ - pkey->listFetch_s += descr_len; - lang_len = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - /* lang starts at pkey->listFetch_s */ - pkey->listFetch_s += lang_len; - - if (pkey->listFetch_s > + if(pkey->listFetch_s + 8 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + status = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + descr_len = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + descr_len + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + /* description starts at pkey->listFetch_s */ + pkey->listFetch_s += descr_len; + lang_len = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + lang_len <= + pkey->listFetch_data + pkey->listFetch_data_len) { + /* lang starts at pkey->listFetch_s */ + pkey->listFetch_s += lang_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s > pkey->listFetch_data + pkey->listFetch_data_len) { _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_PROTOCOL, "Malformed publickey subsystem packet"); goto err_exit; } - if (status == LIBSSH2_PUBLICKEY_SUCCESS) { + if(status == LIBSSH2_PUBLICKEY_SUCCESS) { LIBSSH2_FREE(session, pkey->listFetch_data); pkey->listFetch_data = NULL; *pkey_list = list; @@ -868,7 +951,7 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, } case LIBSSH2_PUBLICKEY_RESPONSE_PUBLICKEY: /* What we want */ - if (keys >= max_keys) { + if(keys >= max_keys) { libssh2_publickey_list *newlist; /* Grow the key list if necessary */ max_keys += 8; @@ -876,7 +959,7 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, LIBSSH2_REALLOC(session, list, (max_keys + 1) * sizeof(libssh2_publickey_list)); - if (!newlist) { + if(!newlist) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "publickey list"); @@ -884,17 +967,26 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, } list = newlist; } - if (pkey->version == 1) { + if(pkey->version == 1) { unsigned long comment_len; - comment_len = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - if (comment_len) { + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + comment_len = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(comment_len) { list[keys].num_attrs = 1; list[keys].attrs = LIBSSH2_ALLOC(session, sizeof(libssh2_publickey_attribute)); - if (!list[keys].attrs) { + if(!list[keys].attrs) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "publickey attributes"); @@ -907,57 +999,184 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, list[keys].attrs[0].mandatory = 0; pkey->listFetch_s += comment_len; - } else { + } + else { list[keys].num_attrs = 0; list[keys].attrs = NULL; } - list[keys].name_len = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - list[keys].name = pkey->listFetch_s; - pkey->listFetch_s += list[keys].name_len; - list[keys].blob_len = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - list[keys].blob = pkey->listFetch_s; - pkey->listFetch_s += list[keys].blob_len; - } else { + + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].name_len = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + list[keys].name_len <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].name = pkey->listFetch_s; + pkey->listFetch_s += list[keys].name_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].blob_len = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + list[keys].blob_len <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].blob = pkey->listFetch_s; + pkey->listFetch_s += list[keys].blob_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + } + else { /* Version == 2 */ - list[keys].name_len = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - list[keys].name = pkey->listFetch_s; - pkey->listFetch_s += list[keys].name_len; - list[keys].blob_len = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - list[keys].blob = pkey->listFetch_s; - pkey->listFetch_s += list[keys].blob_len; - list[keys].num_attrs = _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - if (list[keys].num_attrs) { + + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].name_len = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + list[keys].name_len <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].name = pkey->listFetch_s; + pkey->listFetch_s += list[keys].name_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].blob_len = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + list[keys].blob_len <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].blob = pkey->listFetch_s; + pkey->listFetch_s += list[keys].blob_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].num_attrs = _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(list[keys].num_attrs) { list[keys].attrs = LIBSSH2_ALLOC(session, list[keys].num_attrs * sizeof(libssh2_publickey_attribute)); - if (!list[keys].attrs) { + if(!list[keys].attrs) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "publickey attributes"); goto err_exit; } for(i = 0; i < list[keys].num_attrs; i++) { - list[keys].attrs[i].name_len = - _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - list[keys].attrs[i].name = (char *) pkey->listFetch_s; - pkey->listFetch_s += list[keys].attrs[i].name_len; - list[keys].attrs[i].value_len = - _libssh2_ntohu32(pkey->listFetch_s); - pkey->listFetch_s += 4; - list[keys].attrs[i].value = (char *) pkey->listFetch_s; - pkey->listFetch_s += list[keys].attrs[i].value_len; + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].attrs[i].name_len = + _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, + LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + list[keys].attrs[i].name_len <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].attrs[i].name = + (char *) pkey->listFetch_s; + pkey->listFetch_s += list[keys].attrs[i].name_len; + } + else { + _libssh2_error(session, + LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + 4 <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].attrs[i].value_len = + _libssh2_ntohu32(pkey->listFetch_s); + pkey->listFetch_s += 4; + } + else { + _libssh2_error(session, + LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } + + if(pkey->listFetch_s + + list[keys].attrs[i].value_len <= + pkey->listFetch_data + pkey->listFetch_data_len) { + list[keys].attrs[i].value = + (char *) pkey->listFetch_s; + pkey->listFetch_s += list[keys].attrs[i].value_len; + } + else { + _libssh2_error(session, + LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "ListFetch data too short"); + goto err_exit; + } /* actually an ignored value */ list[keys].attrs[i].mandatory = 0; } - } else { + } + else { list[keys].attrs = NULL; } } @@ -979,11 +1198,11 @@ libssh2_publickey_list_fetch(LIBSSH2_PUBLICKEY * pkey, unsigned long *num_keys, /* Only reached via explicit goto */ err_exit: - if (pkey->listFetch_data) { + if(pkey->listFetch_data) { LIBSSH2_FREE(session, pkey->listFetch_data); pkey->listFetch_data = NULL; } - if (list) { + if(list) { libssh2_publickey_list_free(pkey, list); } pkey->listFetch_state = libssh2_NB_state_idle; @@ -1005,8 +1224,8 @@ libssh2_publickey_list_free(LIBSSH2_PUBLICKEY * pkey, session = pkey->channel->session; - while (p->packet) { - if (p->attrs) { + while(p->packet) { + if(p->attrs) { LIBSSH2_FREE(session, p->attrs); } LIBSSH2_FREE(session, p->packet); @@ -1033,25 +1252,25 @@ libssh2_publickey_shutdown(LIBSSH2_PUBLICKEY *pkey) /* * Make sure all memory used in the state variables are free */ - if (pkey->receive_packet) { + if(pkey->receive_packet) { LIBSSH2_FREE(session, pkey->receive_packet); pkey->receive_packet = NULL; } - if (pkey->add_packet) { + if(pkey->add_packet) { LIBSSH2_FREE(session, pkey->add_packet); pkey->add_packet = NULL; } - if (pkey->remove_packet) { + if(pkey->remove_packet) { LIBSSH2_FREE(session, pkey->remove_packet); pkey->remove_packet = NULL; } - if (pkey->listFetch_data) { + if(pkey->listFetch_data) { LIBSSH2_FREE(session, pkey->listFetch_data); pkey->listFetch_data = NULL; } rc = _libssh2_channel_free(pkey->channel); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; LIBSSH2_FREE(session, pkey); diff --git a/vendor/libssh2/src/scp.c b/vendor/libssh2/src/scp.c index 22778dd38a..8cb3d65c3b 100644 --- a/vendor/libssh2/src/scp.c +++ b/vendor/libssh2/src/scp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2009-2010 by Daniel Stenberg +/* Copyright (c) 2009-2019 by Daniel Stenberg * Copyright (c) 2004-2008, Sara Golemon * All rights reserved. * @@ -65,13 +65,13 @@ current argument word, add the apostrophe in quotation marks "", and open a new argument word instead (_ indicate the input string characters): - _____ _ _ + _____ _ _ 'doesn' "'" 't' Sequences of apostrophes are combined in one pair of quotation marks: a'''b becomes - _ ___ _ + _ ___ _ 'a'"'''"'b' o If the string contains an exclamation mark (!), the C-Shell @@ -84,7 +84,7 @@ a!b become - _ _ _ + _ _ _ 'a'\!'b' The result buffer must be large enough for the expanded result. A @@ -141,9 +141,9 @@ shell_quotearg(const char *path, unsigned char *buf, endp = &buf[bufsize]; src = path; dst = buf; - while (*src && dst < endp - 1) { + while(*src && dst < endp - 1) { - switch (*src) { + switch(*src) { /* * Special handling for apostrophe. * An apostrophe is always written in quotation marks, e.g. @@ -151,16 +151,16 @@ shell_quotearg(const char *path, unsigned char *buf, */ case '\'': - switch (state) { + switch(state) { case UQSTRING: /* Unquoted string */ - if (dst+1 >= endp) + if(dst + 1 >= endp) return 0; *dst++ = '"'; break; case QSTRING: /* Continue quoted string */ break; case SQSTRING: /* Close single quoted string */ - if (dst+2 >= endp) + if(dst + 2 >= endp) return 0; *dst++ = '\''; *dst++ = '"'; @@ -179,20 +179,20 @@ shell_quotearg(const char *path, unsigned char *buf, */ case '!': - switch (state) { + switch(state) { case UQSTRING: - if (dst+1 >= endp) + if(dst + 1 >= endp) return 0; *dst++ = '\\'; break; case QSTRING: - if (dst+2 >= endp) + if(dst + 2 >= endp) return 0; *dst++ = '"'; /* Closing quotation mark */ *dst++ = '\\'; break; case SQSTRING: /* Close single quoted string */ - if (dst+2 >= endp) + if(dst + 2 >= endp) return 0; *dst++ = '\''; *dst++ = '\\'; @@ -208,14 +208,14 @@ shell_quotearg(const char *path, unsigned char *buf, */ default: - switch (state) { + switch(state) { case UQSTRING: - if (dst+1 >= endp) + if(dst + 1 >= endp) return 0; *dst++ = '\''; break; case QSTRING: - if (dst+2 >= endp) + if(dst + 2 >= endp) return 0; *dst++ = '"'; /* Closing quotation mark */ *dst++ = '\''; @@ -229,21 +229,21 @@ shell_quotearg(const char *path, unsigned char *buf, break; } - if (dst+1 >= endp) + if(dst + 1 >= endp) return 0; *dst++ = *src++; } - switch (state) { + switch(state) { case UQSTRING: break; case QSTRING: /* Close quoted string */ - if (dst+1 >= endp) + if(dst + 1 >= endp) return 0; *dst++ = '"'; break; case SQSTRING: /* Close single quoted string */ - if (dst+1 >= endp) + if(dst + 1 >= endp) return 0; *dst++ = '\''; break; @@ -251,7 +251,7 @@ shell_quotearg(const char *path, unsigned char *buf, break; } - if (dst+1 >= endp) + if(dst + 1 >= endp) return 0; *dst = '\0'; @@ -275,7 +275,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) int tmp_err_code; const char *tmp_err_msg; - if (session->scpRecv_state == libssh2_NB_state_idle) { + if(session->scpRecv_state == libssh2_NB_state_idle) { session->scpRecv_mode = 0; session->scpRecv_size = 0; session->scpRecv_mtime = 0; @@ -287,7 +287,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_command = LIBSSH2_ALLOC(session, session->scpRecv_command_len); - if (!session->scpRecv_command) { + if(!session->scpRecv_command) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate a command buffer for " "SCP session"); @@ -303,8 +303,8 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) &session->scpRecv_command[cmd_len], session->scpRecv_command_len - cmd_len); - session->scpRecv_command[cmd_len] = '\0'; - session->scpRecv_command_len = cmd_len + 1; + /* the command to exec should _not_ be NUL-terminated */ + session->scpRecv_command_len = cmd_len; _libssh2_debug(session, LIBSSH2_TRACE_SCP, "Opening channel for SCP receive"); @@ -312,7 +312,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_created; } - if (session->scpRecv_state == libssh2_NB_state_created) { + if(session->scpRecv_state == libssh2_NB_state_created) { /* Allocate a channel */ session->scpRecv_channel = _libssh2_channel_open(session, "session", @@ -320,8 +320,8 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) LIBSSH2_CHANNEL_WINDOW_DEFAULT, LIBSSH2_CHANNEL_PACKET_DEFAULT, NULL, 0); - if (!session->scpRecv_channel) { - if (libssh2_session_last_errno(session) != + if(!session->scpRecv_channel) { + if(libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { LIBSSH2_FREE(session, session->scpRecv_command); session->scpRecv_command = NULL; @@ -337,17 +337,18 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_sent; } - if (session->scpRecv_state == libssh2_NB_state_sent) { + if(session->scpRecv_state == libssh2_NB_state_sent) { /* Request SCP for the desired file */ rc = _libssh2_channel_process_startup(session->scpRecv_channel, "exec", sizeof("exec") - 1, - (char *) session->scpRecv_command, + (char *)session->scpRecv_command, session->scpRecv_command_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block requesting SCP startup"); return NULL; - } else if (rc) { + } + else if(rc) { LIBSSH2_FREE(session, session->scpRecv_command); session->scpRecv_command = NULL; goto scp_recv_error; @@ -362,14 +363,15 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_sent1; } - if (session->scpRecv_state == libssh2_NB_state_sent1) { + if(session->scpRecv_state == libssh2_NB_state_sent1) { rc = _libssh2_channel_write(session->scpRecv_channel, 0, session->scpRecv_response, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block sending initial wakeup"); return NULL; - } else if (rc != 1) { + } + else if(rc != 1) { goto scp_recv_error; } @@ -379,23 +381,23 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_sent2; } - if ((session->scpRecv_state == libssh2_NB_state_sent2) + if((session->scpRecv_state == libssh2_NB_state_sent2) || (session->scpRecv_state == libssh2_NB_state_sent3)) { - while (sb && (session->scpRecv_response_len < + while(sb && (session->scpRecv_response_len < LIBSSH2_SCP_RESPONSE_BUFLEN)) { unsigned char *s, *p; - if (session->scpRecv_state == libssh2_NB_state_sent2) { + if(session->scpRecv_state == libssh2_NB_state_sent2) { rc = _libssh2_channel_read(session->scpRecv_channel, 0, (char *) session-> scpRecv_response + session->scpRecv_response_len, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for SCP response"); return NULL; } - else if (rc < 0) { + else if(rc < 0) { /* error, give up */ _libssh2_error(session, rc, "Failed reading SCP response"); goto scp_recv_error; @@ -405,7 +407,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_response_len++; - if (session->scpRecv_response[0] != 'T') { + if(session->scpRecv_response[0] != 'T') { size_t err_len; char *err_msg; @@ -419,7 +421,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) _libssh2_channel_packet_data_len(session-> scpRecv_channel, 0); err_msg = LIBSSH2_ALLOC(session, err_len + 1); - if (!err_msg) { + if(!err_msg) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Failed to get memory "); goto scp_recv_error; @@ -431,7 +433,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) /* If it failed for any reason, we ignore it anyway. */ /* zero terminate the error */ - err_msg[err_len]=0; + err_msg[err_len] = 0; _libssh2_debug(session, LIBSSH2_TRACE_SCP, "got %02x %s", session->scpRecv_response[0], @@ -444,7 +446,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) goto scp_recv_error; } - if ((session->scpRecv_response_len > 1) && + if((session->scpRecv_response_len > 1) && ((session-> scpRecv_response[session->scpRecv_response_len - 1] < '0') @@ -465,15 +467,16 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) goto scp_recv_error; } - if ((session->scpRecv_response_len < 9) + if((session->scpRecv_response_len < 9) || (session-> scpRecv_response[session->scpRecv_response_len - 1] != '\n')) { - if (session->scpRecv_response_len == + if(session->scpRecv_response_len == LIBSSH2_SCP_RESPONSE_BUFLEN) { /* You had your chance */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Unterminated response from SCP server"); + "Unterminated response from " + "SCP server"); goto scp_recv_error; } /* Way too short to be an SCP response, or not done yet, @@ -483,7 +486,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) /* We're guaranteed not to go under response_len == 0 by the logic above */ - while ((session-> + while((session-> scpRecv_response[session->scpRecv_response_len - 1] == '\r') || (session-> @@ -493,18 +496,18 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_response[session->scpRecv_response_len] = '\0'; - if (session->scpRecv_response_len < 8) { + if(session->scpRecv_response_len < 8) { /* EOL came too soon */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, "Invalid response from SCP server, " - "too short" ); + "too short"); goto scp_recv_error; } s = session->scpRecv_response + 1; p = (unsigned char *) strchr((char *) s, ' '); - if (!p || ((p - s) <= 0)) { + if(!p || ((p - s) <= 0)) { /* No spaces or space in the wrong spot */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, "Invalid response from SCP server, " @@ -517,20 +520,22 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_mtime = strtol((char *) s, NULL, 10); s = (unsigned char *) strchr((char *) p, ' '); - if (!s || ((s - p) <= 0)) { + if(!s || ((s - p) <= 0)) { /* No spaces or space in the wrong spot */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Invalid response from SCP server, malformed mtime.usec"); + "Invalid response from SCP server, " + "malformed mtime.usec"); goto scp_recv_error; } /* Ignore mtime.usec */ s++; p = (unsigned char *) strchr((char *) s, ' '); - if (!p || ((p - s) <= 0)) { + if(!p || ((p - s) <= 0)) { /* No spaces or space in the wrong spot */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Invalid response from SCP server, too short or malformed"); + "Invalid response from SCP server, " + "too short or malformed"); goto scp_recv_error; } @@ -544,14 +549,15 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_sent3; } - if (session->scpRecv_state == libssh2_NB_state_sent3) { + if(session->scpRecv_state == libssh2_NB_state_sent3) { rc = _libssh2_channel_write(session->scpRecv_channel, 0, session->scpRecv_response, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting to send SCP ACK"); return NULL; - } else if (rc != 1) { + } + else if(rc != 1) { goto scp_recv_error; } @@ -568,28 +574,28 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_sent4; } - if (session->scpRecv_state == libssh2_NB_state_sent4) { + if(session->scpRecv_state == libssh2_NB_state_sent4) { session->scpRecv_response_len = 0; session->scpRecv_state = libssh2_NB_state_sent5; } - if ((session->scpRecv_state == libssh2_NB_state_sent5) + if((session->scpRecv_state == libssh2_NB_state_sent5) || (session->scpRecv_state == libssh2_NB_state_sent6)) { - while (session->scpRecv_response_len < LIBSSH2_SCP_RESPONSE_BUFLEN) { + while(session->scpRecv_response_len < LIBSSH2_SCP_RESPONSE_BUFLEN) { char *s, *p, *e = NULL; - if (session->scpRecv_state == libssh2_NB_state_sent5) { + if(session->scpRecv_state == libssh2_NB_state_sent5) { rc = _libssh2_channel_read(session->scpRecv_channel, 0, (char *) session-> scpRecv_response + session->scpRecv_response_len, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for SCP response"); return NULL; } - else if (rc < 0) { + else if(rc < 0) { /* error, bail out*/ _libssh2_error(session, rc, "Failed reading SCP response"); goto scp_recv_error; @@ -599,13 +605,13 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_response_len++; - if (session->scpRecv_response[0] != 'C') { + if(session->scpRecv_response[0] != 'C') { _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, "Invalid response from SCP server"); goto scp_recv_error; } - if ((session->scpRecv_response_len > 1) && + if((session->scpRecv_response_len > 1) && (session-> scpRecv_response[session->scpRecv_response_len - 1] != '\r') @@ -621,15 +627,16 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) goto scp_recv_error; } - if ((session->scpRecv_response_len < 7) + if((session->scpRecv_response_len < 7) || (session-> scpRecv_response[session->scpRecv_response_len - 1] != '\n')) { - if (session->scpRecv_response_len == + if(session->scpRecv_response_len == LIBSSH2_SCP_RESPONSE_BUFLEN) { /* You had your chance */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Unterminated response from SCP server"); + "Unterminated response " + "from SCP server"); goto scp_recv_error; } /* Way too short to be an SCP response, or not done yet, @@ -639,7 +646,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) /* We're guaranteed not to go under response_len == 0 by the logic above */ - while ((session-> + while((session-> scpRecv_response[session->scpRecv_response_len - 1] == '\r') || (session-> @@ -650,20 +657,22 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_response[session->scpRecv_response_len] = '\0'; - if (session->scpRecv_response_len < 6) { + if(session->scpRecv_response_len < 6) { /* EOL came too soon */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Invalid response from SCP server, too short"); + "Invalid response from SCP server, " + "too short"); goto scp_recv_error; } s = (char *) session->scpRecv_response + 1; p = strchr(s, ' '); - if (!p || ((p - s) <= 0)) { + if(!p || ((p - s) <= 0)) { /* No spaces or space in the wrong spot */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Invalid response from SCP server, malformed mode"); + "Invalid response from SCP server, " + "malformed mode"); goto scp_recv_error; } @@ -671,26 +680,29 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) /* Make sure we don't get fooled by leftover values */ session->scpRecv_mode = strtol(s, &e, 8); - if (e && *e) { + if(e && *e) { _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Invalid response from SCP server, invalid mode"); + "Invalid response from SCP server, " + "invalid mode"); goto scp_recv_error; } s = strchr(p, ' '); - if (!s || ((s - p) <= 0)) { + if(!s || ((s - p) <= 0)) { /* No spaces or space in the wrong spot */ _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Invalid response from SCP server, too short or malformed"); + "Invalid response from SCP server, " + "too short or malformed"); goto scp_recv_error; } *s = '\0'; /* Make sure we don't get fooled by leftover values */ session->scpRecv_size = scpsize_strtol(p, &e, 10); - if (e && *e) { + if(e && *e) { _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, - "Invalid response from SCP server, invalid size"); + "Invalid response from SCP server, " + "invalid size"); goto scp_recv_error; } @@ -700,14 +712,15 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_sent6; } - if (session->scpRecv_state == libssh2_NB_state_sent6) { + if(session->scpRecv_state == libssh2_NB_state_sent6) { rc = _libssh2_channel_write(session->scpRecv_channel, 0, session->scpRecv_response, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block sending SCP ACK"); return NULL; - } else if (rc != 1) { + } + else if(rc != 1) { goto scp_recv_error; } _libssh2_debug(session, LIBSSH2_TRACE_SCP, @@ -723,7 +736,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) session->scpRecv_state = libssh2_NB_state_sent7; } - if (sb) { + if(sb) { memset(sb, 0, sizeof(libssh2_struct_stat)); sb->st_mtime = session->scpRecv_mtime; @@ -747,7 +760,7 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) scp_recv_error: tmp_err_code = session->err_code; tmp_err_msg = session->err_msg; - while (libssh2_channel_free(session->scpRecv_channel) == + while(libssh2_channel_free(session->scpRecv_channel) == LIBSSH2_ERROR_EAGAIN); session->err_code = tmp_err_code; session->err_msg = tmp_err_msg; @@ -761,9 +774,9 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) * * DEPRECATED * - * Open a channel and request a remote file via SCP. This receives files larger - * than 2 GB, but is unable to report the proper size on platforms where the - * st_size member of struct stat is limited to 2 GB (e.g. windows). + * Open a channel and request a remote file via SCP. This receives files + * larger than 2 GB, but is unable to report the proper size on platforms + * where the st_size member of struct stat is limited to 2 GB (e.g. windows). * */ LIBSSH2_API LIBSSH2_CHANNEL * @@ -771,15 +784,17 @@ libssh2_scp_recv(LIBSSH2_SESSION *session, const char *path, struct stat * sb) { LIBSSH2_CHANNEL *ptr; - /* scp_recv uses libssh2_struct_stat, so pass one if the caller gave us a struct to populate... */ + /* scp_recv uses libssh2_struct_stat, so pass one if the caller gave us a + struct to populate... */ libssh2_struct_stat sb_intl; libssh2_struct_stat *sb_ptr; + memset(&sb_intl, 0, sizeof(sb_intl)); sb_ptr = sb ? &sb_intl : NULL; BLOCK_ADJUST_ERRNO(ptr, session, scp_recv(session, path, sb_ptr)); /* ...and populate the caller's with as much info as fits. */ - if (sb) { + if(sb) { memset(sb, 0, sizeof(struct stat)); sb->st_mtime = sb_intl.st_mtime; @@ -799,7 +814,8 @@ libssh2_scp_recv(LIBSSH2_SESSION *session, const char *path, struct stat * sb) * */ LIBSSH2_API LIBSSH2_CHANNEL * -libssh2_scp_recv2(LIBSSH2_SESSION *session, const char *path, libssh2_struct_stat * sb) +libssh2_scp_recv2(LIBSSH2_SESSION *session, const char *path, + libssh2_struct_stat *sb) { LIBSSH2_CHANNEL *ptr; BLOCK_ADJUST_ERRNO(ptr, session, scp_recv(session, path, sb)); @@ -821,7 +837,7 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, int tmp_err_code; const char *tmp_err_msg; - if (session->scpSend_state == libssh2_NB_state_idle) { + if(session->scpSend_state == libssh2_NB_state_idle) { session->scpSend_command_len = _libssh2_shell_quotedsize(path) + sizeof("scp -t ") + ((mtime || atime)?1:0); @@ -829,7 +845,7 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_command = LIBSSH2_ALLOC(session, session->scpSend_command_len); - if (!session->scpSend_command) { + if(!session->scpSend_command) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate a command buffer for " "SCP session"); @@ -845,8 +861,8 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, &session->scpSend_command[cmd_len], session->scpSend_command_len - cmd_len); - session->scpSend_command[cmd_len] = '\0'; - session->scpSend_command_len = cmd_len + 1; + /* the command to exec should _not_ be NUL-terminated */ + session->scpSend_command_len = cmd_len; _libssh2_debug(session, LIBSSH2_TRACE_SCP, "Opening channel for SCP send"); @@ -855,13 +871,13 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_state = libssh2_NB_state_created; } - if (session->scpSend_state == libssh2_NB_state_created) { + if(session->scpSend_state == libssh2_NB_state_created) { session->scpSend_channel = _libssh2_channel_open(session, "session", sizeof("session") - 1, LIBSSH2_CHANNEL_WINDOW_DEFAULT, LIBSSH2_CHANNEL_PACKET_DEFAULT, NULL, 0); - if (!session->scpSend_channel) { - if (libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { + if(!session->scpSend_channel) { + if(libssh2_session_last_errno(session) != LIBSSH2_ERROR_EAGAIN) { /* previous call set libssh2_session_last_error(), pass it through */ LIBSSH2_FREE(session, session->scpSend_command); @@ -878,18 +894,18 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_state = libssh2_NB_state_sent; } - if (session->scpSend_state == libssh2_NB_state_sent) { + if(session->scpSend_state == libssh2_NB_state_sent) { /* Request SCP for the desired file */ rc = _libssh2_channel_process_startup(session->scpSend_channel, "exec", sizeof("exec") - 1, - (char *) session->scpSend_command, + (char *)session->scpSend_command, session->scpSend_command_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block requesting SCP startup"); return NULL; } - else if (rc) { + else if(rc) { /* previous call set libssh2_session_last_error(), pass it through */ LIBSSH2_FREE(session, session->scpSend_command); @@ -904,28 +920,28 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_state = libssh2_NB_state_sent1; } - if (session->scpSend_state == libssh2_NB_state_sent1) { + if(session->scpSend_state == libssh2_NB_state_sent1) { /* Wait for ACK */ rc = _libssh2_channel_read(session->scpSend_channel, 0, (char *) session->scpSend_response, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for response from remote"); return NULL; } - else if (rc < 0) { + else if(rc < 0) { _libssh2_error(session, rc, "SCP failure"); goto scp_send_error; } else if(!rc) /* remain in the same state */ goto scp_send_empty_channel; - else if (session->scpSend_response[0] != 0) { + else if(session->scpSend_response[0] != 0) { _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, "Invalid ACK response from remote"); goto scp_send_error; } - if (mtime || atime) { + if(mtime || atime) { /* Send mtime and atime to be used for file */ session->scpSend_response_len = snprintf((char *) session->scpSend_response, @@ -939,16 +955,17 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, } /* Send mtime and atime to be used for file */ - if (mtime || atime) { - if (session->scpSend_state == libssh2_NB_state_sent2) { + if(mtime || atime) { + if(session->scpSend_state == libssh2_NB_state_sent2) { rc = _libssh2_channel_write(session->scpSend_channel, 0, session->scpSend_response, session->scpSend_response_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block sending time data for SCP file"); return NULL; - } else if (rc != (int)session->scpSend_response_len) { + } + else if(rc != (int)session->scpSend_response_len) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send time data for SCP file"); goto scp_send_error; @@ -957,23 +974,23 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_state = libssh2_NB_state_sent3; } - if (session->scpSend_state == libssh2_NB_state_sent3) { + if(session->scpSend_state == libssh2_NB_state_sent3) { /* Wait for ACK */ rc = _libssh2_channel_read(session->scpSend_channel, 0, (char *) session->scpSend_response, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for response"); return NULL; } - else if (rc < 0) { + else if(rc < 0) { _libssh2_error(session, rc, "SCP failure"); goto scp_send_error; } else if(!rc) /* remain in the same state */ goto scp_send_empty_channel; - else if (session->scpSend_response[0] != 0) { + else if(session->scpSend_response[0] != 0) { _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, "Invalid SCP ACK response"); goto scp_send_error; @@ -981,16 +998,17 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_state = libssh2_NB_state_sent4; } - } else { - if (session->scpSend_state == libssh2_NB_state_sent2) { + } + else { + if(session->scpSend_state == libssh2_NB_state_sent2) { session->scpSend_state = libssh2_NB_state_sent4; } } - if (session->scpSend_state == libssh2_NB_state_sent4) { + if(session->scpSend_state == libssh2_NB_state_sent4) { /* Send mode, size, and basename */ const char *base = strrchr(path, '/'); - if (base) + if(base) base++; else base = path; @@ -1006,15 +1024,16 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_state = libssh2_NB_state_sent5; } - if (session->scpSend_state == libssh2_NB_state_sent5) { + if(session->scpSend_state == libssh2_NB_state_sent5) { rc = _libssh2_channel_write(session->scpSend_channel, 0, session->scpSend_response, session->scpSend_response_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block send core file data for SCP file"); return NULL; - } else if (rc != (int)session->scpSend_response_len) { + } + else if(rc != (int)session->scpSend_response_len) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send core file data for SCP file"); goto scp_send_error; @@ -1023,31 +1042,31 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, session->scpSend_state = libssh2_NB_state_sent6; } - if (session->scpSend_state == libssh2_NB_state_sent6) { + if(session->scpSend_state == libssh2_NB_state_sent6) { /* Wait for ACK */ rc = _libssh2_channel_read(session->scpSend_channel, 0, (char *) session->scpSend_response, 1); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for response"); return NULL; } - else if (rc < 0) { + else if(rc < 0) { _libssh2_error(session, LIBSSH2_ERROR_SCP_PROTOCOL, "Invalid ACK response from remote"); goto scp_send_error; } - else if (rc == 0) + else if(rc == 0) goto scp_send_empty_channel; - else if (session->scpSend_response[0] != 0) { + else if(session->scpSend_response[0] != 0) { size_t err_len; char *err_msg; err_len = _libssh2_channel_packet_data_len(session->scpSend_channel, 0); err_msg = LIBSSH2_ALLOC(session, err_len + 1); - if (!err_msg) { + if(!err_msg) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "failed to get memory"); goto scp_send_error; @@ -1056,8 +1075,8 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, /* Read the remote error message */ rc = _libssh2_channel_read(session->scpSend_channel, 0, err_msg, err_len); - if (rc > 0) { - err_msg[err_len]=0; + if(rc > 0) { + err_msg[err_len] = 0; _libssh2_debug(session, LIBSSH2_TRACE_SCP, "got %02x %s", session->scpSend_response[0], err_msg); @@ -1085,8 +1104,8 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, scp_send_error: tmp_err_code = session->err_code; tmp_err_msg = session->err_msg; - while (libssh2_channel_free(session->scpSend_channel) == - LIBSSH2_ERROR_EAGAIN); + while(libssh2_channel_free(session->scpSend_channel) == + LIBSSH2_ERROR_EAGAIN); session->err_code = tmp_err_code; session->err_msg = tmp_err_msg; session->scpSend_channel = NULL; diff --git a/vendor/libssh2/src/session.c b/vendor/libssh2/src/session.c index 6352d12ee7..ae6132c27b 100644 --- a/vendor/libssh2/src/session.c +++ b/vendor/libssh2/src/session.c @@ -99,15 +99,16 @@ banner_receive(LIBSSH2_SESSION * session) int ret; int banner_len; - if (session->banner_TxRx_state == libssh2_NB_state_idle) { + if(session->banner_TxRx_state == libssh2_NB_state_idle) { banner_len = 0; session->banner_TxRx_state = libssh2_NB_state_created; - } else { + } + else { banner_len = session->banner_TxRx_total_send; } - while ((banner_len < (int) sizeof(session->banner_TxRx_banner)) && + while((banner_len < (int) sizeof(session->banner_TxRx_banner)) && ((banner_len == 0) || (session->banner_TxRx_banner[banner_len - 1] != '\n'))) { char c = '\0'; @@ -117,7 +118,7 @@ banner_receive(LIBSSH2_SESSION * session) ret = LIBSSH2_RECV(session, &c, 1, LIBSSH2_SOCKET_RECV_FLAGS(session)); - if (ret < 0) { + if(ret < 0) { if(session->api_block_mode || (ret != -EAGAIN)) /* ignore EAGAIN when non-blocking */ _libssh2_debug(session, LIBSSH2_TRACE_SOCKET, @@ -127,8 +128,8 @@ banner_receive(LIBSSH2_SESSION * session) _libssh2_debug(session, LIBSSH2_TRACE_SOCKET, "Recved %d bytes banner", ret); - if (ret < 0) { - if (ret == -EAGAIN) { + if(ret < 0) { + if(ret == -EAGAIN) { session->socket_block_directions = LIBSSH2_SESSION_BLOCK_INBOUND; session->banner_TxRx_total_send = banner_len; @@ -141,12 +142,12 @@ banner_receive(LIBSSH2_SESSION * session) return LIBSSH2_ERROR_SOCKET_RECV; } - if (ret == 0) { + if(ret == 0) { session->socket_state = LIBSSH2_SOCKET_DISCONNECTED; return LIBSSH2_ERROR_SOCKET_DISCONNECT; } - if (c == '\0') { + if(c == '\0') { /* NULLs are not allowed in SSH banners */ session->banner_TxRx_state = libssh2_NB_state_idle; session->banner_TxRx_total_send = 0; @@ -156,7 +157,7 @@ banner_receive(LIBSSH2_SESSION * session) session->banner_TxRx_banner[banner_len++] = c; } - while (banner_len && + while(banner_len && ((session->banner_TxRx_banner[banner_len - 1] == '\n') || (session->banner_TxRx_banner[banner_len - 1] == '\r'))) { banner_len--; @@ -166,11 +167,14 @@ banner_receive(LIBSSH2_SESSION * session) session->banner_TxRx_state = libssh2_NB_state_idle; session->banner_TxRx_total_send = 0; - if (!banner_len) + if(!banner_len) return LIBSSH2_ERROR_BANNER_RECV; + if(session->remote.banner) + LIBSSH2_FREE(session, session->remote.banner); + session->remote.banner = LIBSSH2_ALLOC(session, banner_len + 1); - if (!session->remote.banner) { + if(!session->remote.banner) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Error allocating space for remote banner"); } @@ -201,20 +205,21 @@ banner_send(LIBSSH2_SESSION * session) char banner_dup[256]; #endif - if (session->banner_TxRx_state == libssh2_NB_state_idle) { - if (session->local.banner) { + if(session->banner_TxRx_state == libssh2_NB_state_idle) { + if(session->local.banner) { /* setopt_string will have given us our \r\n characters */ banner_len = strlen((char *) session->local.banner); banner = (char *) session->local.banner; } #ifdef LIBSSH2DEBUG /* Hack and slash to avoid sending CRLF in debug output */ - if (banner_len < 256) { + if(banner_len < 256) { memcpy(banner_dup, banner, banner_len - 2); banner_dup[banner_len - 2] = '\0'; - } else { + } + else { memcpy(banner_dup, banner, 255); - banner[255] = '\0'; + banner_dup[255] = '\0'; } _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Sending Banner: %s", @@ -231,7 +236,7 @@ banner_send(LIBSSH2_SESSION * session) banner + session->banner_TxRx_total_send, banner_len - session->banner_TxRx_total_send, LIBSSH2_SOCKET_SEND_FLAGS(session)); - if (ret < 0) + if(ret < 0) _libssh2_debug(session, LIBSSH2_TRACE_SOCKET, "Error sending %d bytes: %d", banner_len - session->banner_TxRx_total_send, -ret); @@ -241,12 +246,12 @@ banner_send(LIBSSH2_SESSION * session) banner_len - session->banner_TxRx_total_send, banner, session->banner_TxRx_total_send); - if (ret != (banner_len - session->banner_TxRx_total_send)) { - if (ret >= 0 || ret == -EAGAIN) { + if(ret != (banner_len - session->banner_TxRx_total_send)) { + if(ret >= 0 || ret == -EAGAIN) { /* the whole packet could not be sent, save the what was */ session->socket_block_directions = LIBSSH2_SESSION_BLOCK_OUTBOUND; - if (ret > 0) + if(ret > 0) session->banner_TxRx_total_send += ret; return LIBSSH2_ERROR_EAGAIN; } @@ -278,7 +283,7 @@ session_nonblock(libssh2_socket_t sockfd, /* operate on this */ int flags; flags = fcntl(sockfd, F_GETFL, 0); - if (nonblock) + if(nonblock) return fcntl(sockfd, F_SETFL, flags | O_NONBLOCK); else return fcntl(sockfd, F_SETFL, flags & (~O_NONBLOCK)); @@ -327,7 +332,7 @@ session_nonblock(libssh2_socket_t sockfd, /* operate on this */ #define SETBLOCK 6 #endif -#if (SETBLOCK == 0) +#if(SETBLOCK == 0) #error "no non-blocking method was found/used/set" #endif } @@ -344,9 +349,9 @@ get_socket_nonblocking(int sockfd) #define GETBLOCK 0 #ifdef HAVE_O_NONBLOCK /* most recent unix versions */ - int flags; + int flags = fcntl(sockfd, F_GETFL, 0); - if ((flags = fcntl(sockfd, F_GETFL, 0)) == -1) { + if(flags == -1) { /* Assume blocking on error */ return 1; } @@ -360,7 +365,7 @@ get_socket_nonblocking(int sockfd) unsigned int option_value; socklen_t option_len = sizeof(option_value); - if (getsockopt + if(getsockopt (sockfd, SOL_SOCKET, SO_ERROR, (void *) &option_value, &option_len)) { /* Assume blocking on error */ return 1; @@ -373,7 +378,7 @@ get_socket_nonblocking(int sockfd) #if defined(HAVE_SO_NONBLOCK) && (GETBLOCK == 0) /* BeOS */ long b; - if (getsockopt(sockfd, SOL_SOCKET, SO_NONBLOCK, &b, sizeof(b))) { + if(getsockopt(sockfd, SOL_SOCKET, SO_NONBLOCK, &b, sizeof(b))) { /* Assume blocking on error */ return 1; } @@ -382,19 +387,19 @@ get_socket_nonblocking(int sockfd) #define GETBLOCK 5 #endif -#if defined(SO_STATE) && defined( __VMS ) && (GETBLOCK == 0) +#if defined(SO_STATE) && defined(__VMS) && (GETBLOCK == 0) /* VMS TCP/IP Services */ size_t sockstat = 0; int callstat = 0; - size_t size = sizeof( int ); + size_t size = sizeof(int); callstat = getsockopt(sockfd, SOL_SOCKET, SO_STATE, (char *)&sockstat, &size); - if ( callstat == -1 ) return(0); - if ( (sockstat&SS_NBIO) )return(1); - return(0); + if(callstat == -1) return 0; + if((sockstat&SS_NBIO) != 0) return 1; + return 0; #undef GETBLOCK #define GETBLOCK 6 @@ -406,7 +411,7 @@ get_socket_nonblocking(int sockfd) #define GETBLOCK 7 #endif -#if (GETBLOCK == 0) +#if(GETBLOCK == 0) #error "no non-blocking method was found/used/get" #endif } @@ -419,16 +424,16 @@ libssh2_session_banner_set(LIBSSH2_SESSION * session, const char *banner) { size_t banner_len = banner ? strlen(banner) : 0; - if (session->local.banner) { + if(session->local.banner) { LIBSSH2_FREE(session, session->local.banner); session->local.banner = NULL; } - if (!banner_len) + if(!banner_len) return 0; session->local.banner = LIBSSH2_ALLOC(session, banner_len + 3); - if (!session->local.banner) { + if(!session->local.banner) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for local banner"); } @@ -474,18 +479,18 @@ libssh2_session_init_ex(LIBSSH2_ALLOC_FUNC((*my_alloc)), LIBSSH2_REALLOC_FUNC((*local_realloc)) = libssh2_default_realloc; LIBSSH2_SESSION *session; - if (my_alloc) { + if(my_alloc) { local_alloc = my_alloc; } - if (my_free) { + if(my_free) { local_free = my_free; } - if (my_realloc) { + if(my_realloc) { local_realloc = my_realloc; } session = local_alloc(sizeof(LIBSSH2_SESSION), &abstract); - if (session) { + if(session) { memset(session, 0, sizeof(LIBSSH2_SESSION)); session->alloc = local_alloc; session->free = local_free; @@ -497,7 +502,7 @@ libssh2_session_init_ex(LIBSSH2_ALLOC_FUNC((*my_alloc)), session->api_block_mode = 1; /* blocking API by default */ _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "New session resource allocated"); - _libssh2_init_if_needed (); + _libssh2_init_if_needed(); } return session; } @@ -508,16 +513,18 @@ libssh2_session_init_ex(LIBSSH2_ALLOC_FUNC((*my_alloc)), * Set (or reset) a callback function * Returns the prior address * - * FIXME: this function relies on that we can typecast function pointers + * ALERT: this function relies on that we can typecast function pointers * to void pointers, which isn't allowed in ISO C! */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" LIBSSH2_API void * libssh2_session_callback_set(LIBSSH2_SESSION * session, int cbtype, void *callback) { void *oldcb; - switch (cbtype) { + switch(cbtype) { case LIBSSH2_CALLBACK_IGNORE: oldcb = session->ssh_msg_ignore; session->ssh_msg_ignore = callback; @@ -553,10 +560,12 @@ libssh2_session_callback_set(LIBSSH2_SESSION * session, session->recv = callback; return oldcb; } - _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Setting Callback %d", cbtype); + _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Setting Callback %d", + cbtype); return NULL; } +#pragma GCC diagnostic pop /* * _libssh2_wait_socket() @@ -579,8 +588,8 @@ int _libssh2_wait_socket(LIBSSH2_SESSION *session, time_t start_time) being stored as error when a blocking function has returned */ session->err_code = LIBSSH2_ERROR_NONE; - rc = libssh2_keepalive_send (session, &seconds_to_next); - if (rc < 0) + rc = libssh2_keepalive_send(session, &seconds_to_next); + if(rc) return rc; ms_to_next = seconds_to_next * 1000; @@ -597,19 +606,19 @@ int _libssh2_wait_socket(LIBSSH2_SESSION *session, time_t start_time) ms_to_next = 1000; } - if (session->api_timeout > 0 && + if(session->api_timeout > 0 && (seconds_to_next == 0 || ms_to_next > session->api_timeout)) { - time_t now = time (NULL); + time_t now = time(NULL); elapsed_ms = (long)(1000*difftime(now, start_time)); - if (elapsed_ms > session->api_timeout) { + if(elapsed_ms > session->api_timeout) { return _libssh2_error(session, LIBSSH2_ERROR_TIMEOUT, "API timeout expired"); } ms_to_next = (session->api_timeout - elapsed_ms); has_timeout = 1; } - else if (ms_to_next > 0) { + else if(ms_to_next > 0) { has_timeout = 1; } else @@ -675,10 +684,10 @@ session_startup(LIBSSH2_SESSION *session, libssh2_socket_t sock) { int rc; - if (session->startup_state == libssh2_NB_state_idle) { + if(session->startup_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "session_startup for socket %d", sock); - if (LIBSSH2_INVALID_SOCKET == sock) { + if(LIBSSH2_INVALID_SOCKET == sock) { /* Did we forget something? */ return _libssh2_error(session, LIBSSH2_ERROR_BAD_SOCKET, "Bad socket provided"); @@ -688,10 +697,10 @@ session_startup(LIBSSH2_SESSION *session, libssh2_socket_t sock) session->socket_prev_blockstate = !get_socket_nonblocking(session->socket_fd); - if (session->socket_prev_blockstate) { + if(session->socket_prev_blockstate) { /* If in blocking state change to non-blocking */ rc = session_nonblock(session->socket_fd, 1); - if (rc) { + if(rc) { return _libssh2_error(session, rc, "Failed changing socket's " "blocking state to non-blocking"); @@ -701,9 +710,11 @@ session_startup(LIBSSH2_SESSION *session, libssh2_socket_t sock) session->startup_state = libssh2_NB_state_created; } - if (session->startup_state == libssh2_NB_state_created) { + if(session->startup_state == libssh2_NB_state_created) { rc = banner_send(session); - if (rc) { + if(rc == LIBSSH2_ERROR_EAGAIN) + return rc; + else if(rc) { return _libssh2_error(session, rc, "Failed sending banner"); } @@ -711,10 +722,12 @@ session_startup(LIBSSH2_SESSION *session, libssh2_socket_t sock) session->banner_TxRx_state = libssh2_NB_state_idle; } - if (session->startup_state == libssh2_NB_state_sent) { + if(session->startup_state == libssh2_NB_state_sent) { do { rc = banner_receive(session); - if (rc) + if(rc == LIBSSH2_ERROR_EAGAIN) + return rc; + else if(rc) return _libssh2_error(session, rc, "Failed getting banner"); } while(strncmp("SSH-", (char *)session->remote.banner, 4)); @@ -722,16 +735,18 @@ session_startup(LIBSSH2_SESSION *session, libssh2_socket_t sock) session->startup_state = libssh2_NB_state_sent1; } - if (session->startup_state == libssh2_NB_state_sent1) { + if(session->startup_state == libssh2_NB_state_sent1) { rc = _libssh2_kex_exchange(session, 0, &session->startup_key_state); - if (rc) + if(rc == LIBSSH2_ERROR_EAGAIN) + return rc; + else if(rc) return _libssh2_error(session, rc, "Unable to exchange encryption keys"); session->startup_state = libssh2_NB_state_sent2; } - if (session->startup_state == libssh2_NB_state_sent2) { + if(session->startup_state == libssh2_NB_state_sent2) { _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Requesting userauth service"); @@ -745,11 +760,13 @@ session_startup(LIBSSH2_SESSION *session, libssh2_socket_t sock) session->startup_state = libssh2_NB_state_sent3; } - if (session->startup_state == libssh2_NB_state_sent3) { + if(session->startup_state == libssh2_NB_state_sent3) { rc = _libssh2_transport_send(session, session->startup_service, sizeof("ssh-userauth") + 5 - 1, NULL, 0); - if (rc) { + if(rc == LIBSSH2_ERROR_EAGAIN) + return rc; + else if(rc) { return _libssh2_error(session, rc, "Unable to ask for ssh-userauth service"); } @@ -757,18 +774,24 @@ session_startup(LIBSSH2_SESSION *session, libssh2_socket_t sock) session->startup_state = libssh2_NB_state_sent4; } - if (session->startup_state == libssh2_NB_state_sent4) { + if(session->startup_state == libssh2_NB_state_sent4) { rc = _libssh2_packet_require(session, SSH_MSG_SERVICE_ACCEPT, &session->startup_data, &session->startup_data_len, 0, NULL, 0, &session->startup_req_state); - if (rc) + if(rc) return rc; + if(session->startup_data_len < 5) { + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet length"); + } + session->startup_service_length = _libssh2_ntohu32(session->startup_data + 1); - if ((session->startup_service_length != (sizeof("ssh-userauth") - 1)) + + if((session->startup_service_length != (sizeof("ssh-userauth") - 1)) || strncmp("ssh-userauth", (char *) session->startup_data + 5, session->startup_service_length)) { LIBSSH2_FREE(session, session->startup_data); @@ -838,203 +861,213 @@ session_free(LIBSSH2_SESSION *session) LIBSSH2_LISTENER *l; int packets_left = 0; - if (session->free_state == libssh2_NB_state_idle) { - _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Freeing session resource", + if(session->free_state == libssh2_NB_state_idle) { + _libssh2_debug(session, LIBSSH2_TRACE_TRANS, + "Freeing session resource", session->remote.banner); session->free_state = libssh2_NB_state_created; } - if (session->free_state == libssh2_NB_state_created) { - while ((ch = _libssh2_list_first(&session->channels))) { + if(session->free_state == libssh2_NB_state_created) { + while((ch = _libssh2_list_first(&session->channels))) { rc = _libssh2_channel_free(ch); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; } session->free_state = libssh2_NB_state_sent; } - if (session->free_state == libssh2_NB_state_sent) { - while ((l = _libssh2_list_first(&session->listeners))) { + if(session->free_state == libssh2_NB_state_sent) { + while((l = _libssh2_list_first(&session->listeners))) { rc = _libssh2_channel_forward_cancel(l); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; } session->free_state = libssh2_NB_state_sent1; } - if (session->state & LIBSSH2_STATE_NEWKEYS) { + if(session->state & LIBSSH2_STATE_NEWKEYS) { /* hostkey */ - if (session->hostkey && session->hostkey->dtor) { + if(session->hostkey && session->hostkey->dtor) { session->hostkey->dtor(session, &session->server_hostkey_abstract); } /* Client to Server */ /* crypt */ - if (session->local.crypt && session->local.crypt->dtor) { + if(session->local.crypt && session->local.crypt->dtor) { session->local.crypt->dtor(session, &session->local.crypt_abstract); } /* comp */ - if (session->local.comp && session->local.comp->dtor) { + if(session->local.comp && session->local.comp->dtor) { session->local.comp->dtor(session, 1, &session->local.comp_abstract); } /* mac */ - if (session->local.mac && session->local.mac->dtor) { + if(session->local.mac && session->local.mac->dtor) { session->local.mac->dtor(session, &session->local.mac_abstract); } /* Server to Client */ /* crypt */ - if (session->remote.crypt && session->remote.crypt->dtor) { + if(session->remote.crypt && session->remote.crypt->dtor) { session->remote.crypt->dtor(session, &session->remote.crypt_abstract); } /* comp */ - if (session->remote.comp && session->remote.comp->dtor) { + if(session->remote.comp && session->remote.comp->dtor) { session->remote.comp->dtor(session, 0, &session->remote.comp_abstract); } /* mac */ - if (session->remote.mac && session->remote.mac->dtor) { + if(session->remote.mac && session->remote.mac->dtor) { session->remote.mac->dtor(session, &session->remote.mac_abstract); } /* session_id */ - if (session->session_id) { + if(session->session_id) { LIBSSH2_FREE(session, session->session_id); } } /* Free banner(s) */ - if (session->remote.banner) { + if(session->remote.banner) { LIBSSH2_FREE(session, session->remote.banner); } - if (session->local.banner) { + if(session->local.banner) { LIBSSH2_FREE(session, session->local.banner); } /* Free preference(s) */ - if (session->kex_prefs) { + if(session->kex_prefs) { LIBSSH2_FREE(session, session->kex_prefs); } - if (session->hostkey_prefs) { + if(session->hostkey_prefs) { LIBSSH2_FREE(session, session->hostkey_prefs); } - if (session->local.kexinit) { + if(session->local.kexinit) { LIBSSH2_FREE(session, session->local.kexinit); } - if (session->local.crypt_prefs) { + if(session->local.crypt_prefs) { LIBSSH2_FREE(session, session->local.crypt_prefs); } - if (session->local.mac_prefs) { + if(session->local.mac_prefs) { LIBSSH2_FREE(session, session->local.mac_prefs); } - if (session->local.comp_prefs) { + if(session->local.comp_prefs) { LIBSSH2_FREE(session, session->local.comp_prefs); } - if (session->local.lang_prefs) { + if(session->local.lang_prefs) { LIBSSH2_FREE(session, session->local.lang_prefs); } - if (session->remote.kexinit) { + if(session->remote.kexinit) { LIBSSH2_FREE(session, session->remote.kexinit); } - if (session->remote.crypt_prefs) { + if(session->remote.crypt_prefs) { LIBSSH2_FREE(session, session->remote.crypt_prefs); } - if (session->remote.mac_prefs) { + if(session->remote.mac_prefs) { LIBSSH2_FREE(session, session->remote.mac_prefs); } - if (session->remote.comp_prefs) { + if(session->remote.comp_prefs) { LIBSSH2_FREE(session, session->remote.comp_prefs); } - if (session->remote.lang_prefs) { + if(session->remote.lang_prefs) { LIBSSH2_FREE(session, session->remote.lang_prefs); } + if(session->server_sign_algorithms) { + LIBSSH2_FREE(session, session->server_sign_algorithms); + } + if(session->sign_algo_prefs) { + LIBSSH2_FREE(session, session->sign_algo_prefs); + } /* * Make sure all memory used in the state variables are free */ - if (session->kexinit_data) { + if(session->kexinit_data) { LIBSSH2_FREE(session, session->kexinit_data); } - if (session->startup_data) { + if(session->startup_data) { LIBSSH2_FREE(session, session->startup_data); } - if (session->userauth_list_data) { + if(session->userauth_list_data) { LIBSSH2_FREE(session, session->userauth_list_data); } - if (session->userauth_pswd_data) { + if(session->userauth_banner) { + LIBSSH2_FREE(session, session->userauth_banner); + } + if(session->userauth_pswd_data) { LIBSSH2_FREE(session, session->userauth_pswd_data); } - if (session->userauth_pswd_newpw) { + if(session->userauth_pswd_newpw) { LIBSSH2_FREE(session, session->userauth_pswd_newpw); } - if (session->userauth_host_packet) { + if(session->userauth_host_packet) { LIBSSH2_FREE(session, session->userauth_host_packet); } - if (session->userauth_host_method) { + if(session->userauth_host_method) { LIBSSH2_FREE(session, session->userauth_host_method); } - if (session->userauth_host_data) { + if(session->userauth_host_data) { LIBSSH2_FREE(session, session->userauth_host_data); } - if (session->userauth_pblc_data) { + if(session->userauth_pblc_data) { LIBSSH2_FREE(session, session->userauth_pblc_data); } - if (session->userauth_pblc_packet) { + if(session->userauth_pblc_packet) { LIBSSH2_FREE(session, session->userauth_pblc_packet); } - if (session->userauth_pblc_method) { + if(session->userauth_pblc_method) { LIBSSH2_FREE(session, session->userauth_pblc_method); } - if (session->userauth_kybd_data) { + if(session->userauth_kybd_data) { LIBSSH2_FREE(session, session->userauth_kybd_data); } - if (session->userauth_kybd_packet) { + if(session->userauth_kybd_packet) { LIBSSH2_FREE(session, session->userauth_kybd_packet); } - if (session->userauth_kybd_auth_instruction) { + if(session->userauth_kybd_auth_instruction) { LIBSSH2_FREE(session, session->userauth_kybd_auth_instruction); } - if (session->open_packet) { + if(session->open_packet) { LIBSSH2_FREE(session, session->open_packet); } - if (session->open_data) { + if(session->open_data) { LIBSSH2_FREE(session, session->open_data); } - if (session->direct_message) { + if(session->direct_message) { LIBSSH2_FREE(session, session->direct_message); } - if (session->fwdLstn_packet) { + if(session->fwdLstn_packet) { LIBSSH2_FREE(session, session->fwdLstn_packet); } - if (session->pkeyInit_data) { + if(session->pkeyInit_data) { LIBSSH2_FREE(session, session->pkeyInit_data); } - if (session->scpRecv_command) { + if(session->scpRecv_command) { LIBSSH2_FREE(session, session->scpRecv_command); } - if (session->scpSend_command) { + if(session->scpSend_command) { LIBSSH2_FREE(session, session->scpSend_command); } - if (session->sftpInit_sftp) { + if(session->sftpInit_sftp) { LIBSSH2_FREE(session, session->sftpInit_sftp); } /* Free payload buffer */ - if (session->packet.total_num) { + if(session->packet.total_num) { LIBSSH2_FREE(session, session->packet.payload); } /* Cleanup all remaining packets */ - while ((pkg = _libssh2_list_first(&session->packets))) { + while((pkg = _libssh2_list_first(&session->packets))) { packets_left++; _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "packet left with id %d", pkg->data[0]); @@ -1051,18 +1084,19 @@ session_free(LIBSSH2_SESSION *session) if(session->socket_prev_blockstate) { /* if the socket was previously blocking, put it back so */ rc = session_nonblock(session->socket_fd, 0); - if (rc) { + if(rc) { _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "unable to reset socket's blocking state"); } } - if (session->server_hostkey) { + if(session->server_hostkey) { LIBSSH2_FREE(session, session->server_hostkey); } /* error string */ - if (session->err_msg && ((session->err_flags & LIBSSH2_ERR_FLAG_DUP) != 0)) { + if(session->err_msg && + ((session->err_flags & LIBSSH2_ERR_FLAG_DUP) != 0)) { LIBSSH2_FREE(session, (char *)session->err_msg); } @@ -1099,14 +1133,14 @@ session_disconnect(LIBSSH2_SESSION *session, int reason, unsigned long descr_len = 0, lang_len = 0; int rc; - if (session->disconnect_state == libssh2_NB_state_idle) { + if(session->disconnect_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Disconnecting: reason=%d, desc=%s, lang=%s", reason, description, lang); - if (description) + if(description) descr_len = strlen(description); - if (lang) + if(lang) lang_len = strlen(lang); if(descr_len > 256) @@ -1130,7 +1164,7 @@ session_disconnect(LIBSSH2_SESSION *session, int reason, rc = _libssh2_transport_send(session, session->disconnect_data, session->disconnect_data_len, (unsigned char *)lang, lang_len); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; session->disconnect_state = libssh2_NB_state_idle; @@ -1146,7 +1180,7 @@ libssh2_session_disconnect_ex(LIBSSH2_SESSION *session, int reason, const char *desc, const char *lang) { int rc; - + session->state &= ~LIBSSH2_STATE_EXCHANGING_KEYS; BLOCK_ADJUST(rc, session, session_disconnect(session, reason, desc, lang)); @@ -1166,7 +1200,7 @@ libssh2_session_methods(LIBSSH2_SESSION * session, int method_type) /* All methods have char *name as their first element */ const LIBSSH2_KEX_METHOD *method = NULL; - switch (method_type) { + switch(method_type) { case LIBSSH2_METHOD_KEX: method = session->kex; break; @@ -1211,7 +1245,7 @@ libssh2_session_methods(LIBSSH2_SESSION * session, int method_type) return NULL; } - if (!method) { + if(!method) { _libssh2_error(session, LIBSSH2_ERROR_METHOD_NONE, "No method negotiated"); return NULL; @@ -1242,32 +1276,33 @@ libssh2_session_last_error(LIBSSH2_SESSION * session, char **errmsg, size_t msglen = 0; /* No error to report */ - if (!session->err_code) { - if (errmsg) { - if (want_buf) { + if(!session->err_code) { + if(errmsg) { + if(want_buf) { *errmsg = LIBSSH2_ALLOC(session, 1); - if (*errmsg) { + if(*errmsg) { **errmsg = 0; } - } else { + } + else { *errmsg = (char *) ""; } } - if (errmsg_len) { + if(errmsg_len) { *errmsg_len = 0; } return 0; } - if (errmsg) { + if(errmsg) { const char *error = session->err_msg ? session->err_msg : ""; msglen = strlen(error); - if (want_buf) { + if(want_buf) { /* Make a copy so the calling program can own it */ *errmsg = LIBSSH2_ALLOC(session, msglen + 1); - if (*errmsg) { + if(*errmsg) { memcpy(*errmsg, error, msglen); (*errmsg)[msglen] = 0; } @@ -1276,7 +1311,7 @@ libssh2_session_last_error(LIBSSH2_SESSION * session, char **errmsg, *errmsg = (char *)error; } - if (errmsg_len) { + if(errmsg_len) { *errmsg_len = msglen; } @@ -1304,7 +1339,7 @@ libssh2_session_last_errno(LIBSSH2_SESSION * session) LIBSSH2_API int libssh2_session_set_last_error(LIBSSH2_SESSION* session, int errcode, - const char* errmsg) + const char *errmsg) { return _libssh2_error_flags(session, errcode, errmsg, LIBSSH2_ERR_FLAG_DUP); @@ -1412,14 +1447,20 @@ libssh2_poll_channel_read(LIBSSH2_CHANNEL *channel, int extended) session = channel->session; packet = _libssh2_list_first(&session->packets); - while (packet) { - if ( channel->local.id == _libssh2_ntohu32(packet->data + 1)) { - if ( extended == 1 && - (packet->data[0] == SSH_MSG_CHANNEL_EXTENDED_DATA - || packet->data[0] == SSH_MSG_CHANNEL_DATA )) { + while(packet) { + if(packet->data_len < 5) { + return _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Packet too small"); + } + + if(channel->local.id == _libssh2_ntohu32(packet->data + 1)) { + if(extended == 1 && + (packet->data[0] == SSH_MSG_CHANNEL_EXTENDED_DATA + || packet->data[0] == SSH_MSG_CHANNEL_DATA)) { return 1; - } else if ( extended == 0 && - packet->data[0] == SSH_MSG_CHANNEL_DATA) { + } + else if(extended == 0 && + packet->data[0] == SSH_MSG_CHANNEL_DATA) { return 1; } /* else - no data of any type is ready to be read */ @@ -1470,7 +1511,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) #else struct pollfd sockets[256]; - if (nfds > 256) + if(nfds > 256) /* systems without alloca use a fixed-size array, this can be fixed if we really want to, at least if the compiler is a C99 capable one */ return -1; @@ -1478,7 +1519,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) /* Setup sockets for polling */ for(i = 0; i < nfds; i++) { fds[i].revents = 0; - switch (fds[i].type) { + switch(fds[i].type) { case LIBSSH2_POLLFD_SOCKET: sockets[i].fd = fds[i].fd.socket; sockets[i].events = fds[i].events; @@ -1489,7 +1530,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) sockets[i].fd = fds[i].fd.channel->session->socket_fd; sockets[i].events = POLLIN; sockets[i].revents = 0; - if (!session) + if(!session) session = fds[i].fd.channel->session; break; @@ -1497,12 +1538,12 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) sockets[i].fd = fds[i].fd.listener->session->socket_fd; sockets[i].events = POLLIN; sockets[i].revents = 0; - if (!session) + if(!session) session = fds[i].fd.listener->session; break; default: - if (session) + if(session) _libssh2_error(session, LIBSSH2_ERROR_INVALID_POLL_TYPE, "Invalid descriptor passed to libssh2_poll()"); return -1; @@ -1518,38 +1559,38 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) FD_ZERO(&wfds); for(i = 0; i < nfds; i++) { fds[i].revents = 0; - switch (fds[i].type) { + switch(fds[i].type) { case LIBSSH2_POLLFD_SOCKET: - if (fds[i].events & LIBSSH2_POLLFD_POLLIN) { + if(fds[i].events & LIBSSH2_POLLFD_POLLIN) { FD_SET(fds[i].fd.socket, &rfds); - if (fds[i].fd.socket > maxfd) + if(fds[i].fd.socket > maxfd) maxfd = fds[i].fd.socket; } - if (fds[i].events & LIBSSH2_POLLFD_POLLOUT) { + if(fds[i].events & LIBSSH2_POLLFD_POLLOUT) { FD_SET(fds[i].fd.socket, &wfds); - if (fds[i].fd.socket > maxfd) + if(fds[i].fd.socket > maxfd) maxfd = fds[i].fd.socket; } break; case LIBSSH2_POLLFD_CHANNEL: FD_SET(fds[i].fd.channel->session->socket_fd, &rfds); - if (fds[i].fd.channel->session->socket_fd > maxfd) + if(fds[i].fd.channel->session->socket_fd > maxfd) maxfd = fds[i].fd.channel->session->socket_fd; - if (!session) + if(!session) session = fds[i].fd.channel->session; break; case LIBSSH2_POLLFD_LISTENER: FD_SET(fds[i].fd.listener->session->socket_fd, &rfds); - if (fds[i].fd.listener->session->socket_fd > maxfd) + if(fds[i].fd.listener->session->socket_fd > maxfd) maxfd = fds[i].fd.listener->session->socket_fd; - if (!session) + if(!session) session = fds[i].fd.listener->session; break; default: - if (session) + if(session) _libssh2_error(session, LIBSSH2_ERROR_INVALID_POLL_TYPE, "Invalid descriptor passed to libssh2_poll()"); return -1; @@ -1572,10 +1613,10 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) active_fds = 0; for(i = 0; i < nfds; i++) { - if (fds[i].events != fds[i].revents) { - switch (fds[i].type) { + if(fds[i].events != fds[i].revents) { + switch(fds[i].type) { case LIBSSH2_POLLFD_CHANNEL: - if ((fds[i].events & LIBSSH2_POLLFD_POLLIN) && + if((fds[i].events & LIBSSH2_POLLFD_POLLIN) && /* Want to be ready for read */ ((fds[i].revents & LIBSSH2_POLLFD_POLLIN) == 0)) { /* Not yet known to be ready for read */ @@ -1584,7 +1625,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) 0) ? LIBSSH2_POLLFD_POLLIN : 0; } - if ((fds[i].events & LIBSSH2_POLLFD_POLLEXT) && + if((fds[i].events & LIBSSH2_POLLFD_POLLEXT) && /* Want to be ready for extended read */ ((fds[i].revents & LIBSSH2_POLLFD_POLLEXT) == 0)) { /* Not yet known to be ready for extended read */ @@ -1593,7 +1634,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) 1) ? LIBSSH2_POLLFD_POLLEXT : 0; } - if ((fds[i].events & LIBSSH2_POLLFD_POLLOUT) && + if((fds[i].events & LIBSSH2_POLLFD_POLLOUT) && /* Want to be ready for write */ ((fds[i].revents & LIBSSH2_POLLFD_POLLOUT) == 0)) { /* Not yet known to be ready for write */ @@ -1601,11 +1642,11 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) poll_channel_write(fds[i].fd. channel) ? LIBSSH2_POLLFD_POLLOUT : 0; } - if (fds[i].fd.channel->remote.close + if(fds[i].fd.channel->remote.close || fds[i].fd.channel->local.close) { fds[i].revents |= LIBSSH2_POLLFD_CHANNEL_CLOSED; } - if (fds[i].fd.channel->session->socket_state == + if(fds[i].fd.channel->session->socket_state == LIBSSH2_SOCKET_DISCONNECTED) { fds[i].revents |= LIBSSH2_POLLFD_CHANNEL_CLOSED | @@ -1614,7 +1655,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) break; case LIBSSH2_POLLFD_LISTENER: - if ((fds[i].events & LIBSSH2_POLLFD_POLLIN) && + if((fds[i].events & LIBSSH2_POLLFD_POLLIN) && /* Want a connection */ ((fds[i].revents & LIBSSH2_POLLFD_POLLIN) == 0)) { /* No connections known of yet */ @@ -1622,7 +1663,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) poll_listener_queued(fds[i].fd. listener) ? LIBSSH2_POLLFD_POLLIN : 0; } - if (fds[i].fd.listener->session->socket_state == + if(fds[i].fd.listener->session->socket_state == LIBSSH2_SOCKET_DISCONNECTED) { fds[i].revents |= LIBSSH2_POLLFD_LISTENER_CLOSED | @@ -1631,12 +1672,12 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) break; } } - if (fds[i].revents) { + if(fds[i].revents) { active_fds++; } } - if (active_fds) { + if(active_fds) { /* Don't block on the sockets if we have channels/listeners which are ready */ timeout_remaining = 0; @@ -1661,23 +1702,25 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) timeout_remaining = 0; #endif /* HAVE_GETTIMEOFDAY */ - if (sysret > 0) { + if(sysret > 0) { for(i = 0; i < nfds; i++) { - switch (fds[i].type) { + switch(fds[i].type) { case LIBSSH2_POLLFD_SOCKET: fds[i].revents = sockets[i].revents; - sockets[i].revents = 0; /* In case we loop again, be nice */ - if (fds[i].revents) { + sockets[i].revents = 0; /* In case we loop again, be + nice */ + if(fds[i].revents) { active_fds++; } break; case LIBSSH2_POLLFD_CHANNEL: - if (sockets[i].events & POLLIN) { + if(sockets[i].events & POLLIN) { /* Spin session until no data available */ - while (_libssh2_transport_read(fds[i].fd.channel->session) - > 0); + while(_libssh2_transport_read(fds[i].fd. + channel->session) + > 0); } - if (sockets[i].revents & POLLHUP) { + if(sockets[i].revents & POLLHUP) { fds[i].revents |= LIBSSH2_POLLFD_CHANNEL_CLOSED | LIBSSH2_POLLFD_SESSION_CLOSED; @@ -1685,12 +1728,13 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) sockets[i].revents = 0; break; case LIBSSH2_POLLFD_LISTENER: - if (sockets[i].events & POLLIN) { + if(sockets[i].events & POLLIN) { /* Spin session until no data available */ - while (_libssh2_transport_read(fds[i].fd.listener->session) - > 0); + while(_libssh2_transport_read(fds[i].fd. + listener->session) + > 0); } - if (sockets[i].revents & POLLHUP) { + if(sockets[i].revents & POLLHUP) { fds[i].revents |= LIBSSH2_POLLFD_LISTENER_CLOSED | LIBSSH2_POLLFD_SESSION_CLOSED; @@ -1708,7 +1752,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) struct timeval tv_begin, tv_end; _libssh2_gettimeofday((struct timeval *) &tv_begin, NULL); - sysret = select(maxfd+1, &rfds, &wfds, NULL, &tv); + sysret = select(maxfd + 1, &rfds, &wfds, NULL, &tv); _libssh2_gettimeofday((struct timeval *) &tv_end, NULL); timeout_remaining -= (tv_end.tv_sec - tv_begin.tv_sec) * 1000; @@ -1718,39 +1762,42 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) /* If the platform doesn't support gettimeofday, * then just make the call non-blocking and walk away */ - sysret = select(maxfd+1, &rfds, &wfds, NULL, &tv); + sysret = select(maxfd + 1, &rfds, &wfds, NULL, &tv); timeout_remaining = 0; #endif - if (sysret > 0) { + if(sysret > 0) { for(i = 0; i < nfds; i++) { - switch (fds[i].type) { + switch(fds[i].type) { case LIBSSH2_POLLFD_SOCKET: - if (FD_ISSET(fds[i].fd.socket, &rfds)) { + if(FD_ISSET(fds[i].fd.socket, &rfds)) { fds[i].revents |= LIBSSH2_POLLFD_POLLIN; } - if (FD_ISSET(fds[i].fd.socket, &wfds)) { + if(FD_ISSET(fds[i].fd.socket, &wfds)) { fds[i].revents |= LIBSSH2_POLLFD_POLLOUT; } - if (fds[i].revents) { + if(fds[i].revents) { active_fds++; } break; case LIBSSH2_POLLFD_CHANNEL: - if (FD_ISSET(fds[i].fd.channel->session->socket_fd, &rfds)) { + if(FD_ISSET(fds[i].fd.channel->session->socket_fd, + &rfds)) { /* Spin session until no data available */ - while (_libssh2_transport_read(fds[i].fd.channel->session) - > 0); + while(_libssh2_transport_read(fds[i].fd. + channel->session) + > 0); } break; case LIBSSH2_POLLFD_LISTENER: - if (FD_ISSET + if(FD_ISSET (fds[i].fd.listener->session->socket_fd, &rfds)) { /* Spin session until no data available */ - while (_libssh2_transport_read(fds[i].fd.listener->session) - > 0); + while(_libssh2_transport_read(fds[i].fd. + listener->session) + > 0); } break; } @@ -1758,7 +1805,7 @@ libssh2_poll(LIBSSH2_POLLFD * fds, unsigned int nfds, long timeout) } #endif /* else no select() or poll() -- timeout (and by extension * timeout_remaining) will be equal to 0 */ - } while ((timeout_remaining > 0) && !active_fds); + } while((timeout_remaining > 0) && !active_fds); return active_fds; } @@ -1784,10 +1831,10 @@ LIBSSH2_API const char * libssh2_session_banner_get(LIBSSH2_SESSION *session) { /* to avoid a coredump when session is NULL */ - if (NULL == session) + if(NULL == session) return NULL; - if (NULL==session->remote.banner) + if(NULL == session->remote.banner) return NULL; return (const char *) session->remote.banner; diff --git a/vendor/libssh2/src/session.h b/vendor/libssh2/src/session.h index aff4f2c5c8..9f8f2c7060 100644 --- a/vendor/libssh2/src/session.h +++ b/vendor/libssh2/src/session.h @@ -1,5 +1,5 @@ -#ifndef LIBSSH2_SESSION_H -#define LIBSSH2_SESSION_H +#ifndef __LIBSSH2_SESSION_H +#define __LIBSSH2_SESSION_H /* Copyright (c) 2004-2007 Sara Golemon * Copyright (c) 2009-2010 by Daniel Stenberg * Copyright (c) 2010 Simon Josefsson @@ -51,9 +51,9 @@ function. */ -#define BLOCK_ADJUST(rc,sess,x) \ +#define BLOCK_ADJUST(rc, sess, x) \ do { \ - time_t entry_time = time (NULL); \ + time_t entry_time = time(NULL); \ do { \ rc = x; \ /* the order of the check below is important to properly deal with \ @@ -70,9 +70,9 @@ * immediately. If the API is blocking and we get a NULL we check the errno * and *only* if that is EAGAIN we loop and wait for socket action. */ -#define BLOCK_ADJUST_ERRNO(ptr,sess,x) \ +#define BLOCK_ADJUST_ERRNO(ptr, sess, x) \ do { \ - time_t entry_time = time (NULL); \ + time_t entry_time = time(NULL); \ int rc; \ do { \ ptr = x; \ @@ -90,4 +90,4 @@ int _libssh2_wait_socket(LIBSSH2_SESSION *session, time_t entry_time); /* this is the lib-internal set blocking function */ int _libssh2_session_set_blocking(LIBSSH2_SESSION * session, int blocking); -#endif /* LIBSSH2_SESSION_H */ +#endif /* __LIBSSH2_SESSION_H */ diff --git a/vendor/libssh2/src/sftp.c b/vendor/libssh2/src/sftp.c index 7c44116401..ac7ee01621 100644 --- a/vendor/libssh2/src/sftp.c +++ b/vendor/libssh2/src/sftp.c @@ -1,6 +1,6 @@ /* Copyright (c) 2004-2008, Sara Golemon * Copyright (c) 2007 Eli Fant - * Copyright (c) 2009-2014 by Daniel Stenberg + * Copyright (c) 2009-2019 by Daniel Stenberg * All rights reserved. * * Redistribution and use in source and binary forms, @@ -91,7 +91,7 @@ /* This is the maximum packet length to accept, as larger than this indicate some kind of server problem. */ -#define LIBSSH2_SFTP_PACKET_MAXLEN 80000 +#define LIBSSH2_SFTP_PACKET_MAXLEN (256 * 1024) static int sftp_packet_ask(LIBSSH2_SFTP *sftp, unsigned char packet_type, uint32_t request_id, unsigned char **data, @@ -161,7 +161,8 @@ remove_zombie_request(LIBSSH2_SFTP *sftp, uint32_t request_id) request_id); if(zombie) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, - "Removing request ID %ld from the list of zombie requests", + "Removing request ID %ld from the list of " + "zombie requests", request_id); _libssh2_list_remove(&zombie->node); @@ -181,7 +182,7 @@ add_zombie_request(LIBSSH2_SFTP *sftp, uint32_t request_id) zombie = LIBSSH2_ALLOC(sftp->channel->session, sizeof(struct sftp_zombie_requests)); - if (!zombie) + if(!zombie) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "malloc fail for zombie request ID"); else { @@ -204,6 +205,10 @@ sftp_packet_add(LIBSSH2_SFTP *sftp, unsigned char *data, LIBSSH2_SFTP_PACKET *packet; uint32_t request_id; + if(data_len < 5) { + return LIBSSH2_ERROR_OUT_OF_BOUNDARY; + } + _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Received packet type %d (len %d)", (int) data[0], data_len); @@ -268,7 +273,7 @@ sftp_packet_add(LIBSSH2_SFTP *sftp, unsigned char *data, } packet = LIBSSH2_ALLOC(session, sizeof(LIBSSH2_SFTP_PACKET)); - if (!packet) { + if(!packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate datablock for SFTP packet"); } @@ -327,9 +332,9 @@ sftp_packet_read(LIBSSH2_SFTP *sftp) (char *)&sftp->partial_size[ sftp->partial_size_len], 4 - sftp->partial_size_len); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; - else if (rc < 0) + else if(rc < 0) return _libssh2_error(session, rc, "channel read"); sftp->partial_size_len += rc; @@ -341,16 +346,24 @@ sftp_packet_read(LIBSSH2_SFTP *sftp) sftp->partial_len = _libssh2_ntohu32(sftp->partial_size); /* make sure we don't proceed if the packet size is unreasonably large */ - if (sftp->partial_len > LIBSSH2_SFTP_PACKET_MAXLEN) + if(sftp->partial_len > LIBSSH2_SFTP_PACKET_MAXLEN) { + libssh2_channel_flush(channel); + sftp->partial_size_len = 0; return _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_PACKET_EXCEEDED, "SFTP packet too large"); + } + + if(sftp->partial_len == 0) + return _libssh2_error(session, + LIBSSH2_ERROR_ALLOC, + "Unable to allocate empty SFTP packet"); _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Data begin - Packet Length: %lu", sftp->partial_len); packet = LIBSSH2_ALLOC(session, sftp->partial_len); - if (!packet) + if(!packet) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate SFTP packet"); sftp->partial_size_len = 0; @@ -364,7 +377,8 @@ sftp_packet_read(LIBSSH2_SFTP *sftp) if(sftp->partial_len > recv_window) { /* ask for twice the data amount we need at once */ rc = _libssh2_channel_receive_window_adjust(channel, - sftp->partial_len*2, + sftp->partial_len + * 2, 1, NULL); /* store the state so that we continue with the correct operation at next invoke */ @@ -378,13 +392,13 @@ sftp_packet_read(LIBSSH2_SFTP *sftp) } /* Read as much of the packet as we can */ - while (sftp->partial_len > sftp->partial_received) { + while(sftp->partial_len > sftp->partial_received) { rc = _libssh2_channel_read(channel, 0, (char *)&packet[sftp->partial_received], sftp->partial_len - sftp->partial_received); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { /* * We received EAGAIN, save what we have and return EAGAIN to * the caller. Set 'partial_packet' so that this function @@ -393,7 +407,7 @@ sftp_packet_read(LIBSSH2_SFTP *sftp) sftp->packet_state = libssh2_NB_state_sent1; return rc; } - else if (rc < 0) { + else if(rc < 0) { LIBSSH2_FREE(session, packet); sftp->partial_packet = NULL; return _libssh2_error(session, rc, @@ -408,7 +422,7 @@ sftp_packet_read(LIBSSH2_SFTP *sftp) so we take a copy of the packet type before we call it. */ packet_type = packet[0]; rc = sftp_packet_add(sftp, packet, sftp->partial_len); - if (rc) { + if(rc) { LIBSSH2_FREE(session, packet); return rc; } @@ -477,7 +491,7 @@ sftp_packet_ask(LIBSSH2_SFTP *sftp, unsigned char packet_type, /* Special consideration when getting VERSION packet */ - while (packet) { + while(packet) { if((packet->data[0] == packet_type) && ((packet_type == SSH_FXP_VERSION) || (packet->request_id == request_id))) { @@ -504,31 +518,45 @@ sftp_packet_ask(LIBSSH2_SFTP *sftp, unsigned char packet_type, static int sftp_packet_require(LIBSSH2_SFTP *sftp, unsigned char packet_type, uint32_t request_id, unsigned char **data, - size_t *data_len) + size_t *data_len, size_t required_size) { LIBSSH2_SESSION *session = sftp->channel->session; int rc; + if(data == NULL || data_len == NULL || required_size == 0) { + return LIBSSH2_ERROR_BAD_USE; + } + _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Requiring packet %d id %ld", (int) packet_type, request_id); - if (sftp_packet_ask(sftp, packet_type, request_id, data, data_len) == 0) { + if(sftp_packet_ask(sftp, packet_type, request_id, data, data_len) == 0) { /* The right packet was available in the packet brigade */ _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Got %d", (int) packet_type); + + if (*data_len < required_size) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } + return LIBSSH2_ERROR_NONE; } - while (session->socket_state == LIBSSH2_SOCKET_CONNECTED) { + while(session->socket_state == LIBSSH2_SOCKET_CONNECTED) { rc = sftp_packet_read(sftp); - if (rc < 0) + if(rc < 0) return rc; /* data was read, check the queue again */ - if (!sftp_packet_ask(sftp, packet_type, request_id, data, data_len)) { + if(!sftp_packet_ask(sftp, packet_type, request_id, data, data_len)) { /* The right packet was available in the packet brigade */ _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Got %d", (int) packet_type); + + if (*data_len < required_size) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } + return LIBSSH2_ERROR_NONE; } } @@ -544,42 +572,53 @@ static int sftp_packet_requirev(LIBSSH2_SFTP *sftp, int num_valid_responses, const unsigned char *valid_responses, uint32_t request_id, unsigned char **data, - size_t *data_len) + size_t *data_len, size_t required_size) { int i; int rc; + if(data == NULL || data_len == NULL || required_size == 0) { + return LIBSSH2_ERROR_BAD_USE; + } + /* If no timeout is active, start a new one */ - if (sftp->requirev_start == 0) + if(sftp->requirev_start == 0) sftp->requirev_start = time(NULL); - while (sftp->channel->session->socket_state == LIBSSH2_SOCKET_CONNECTED) { + while(sftp->channel->session->socket_state == LIBSSH2_SOCKET_CONNECTED) { for(i = 0; i < num_valid_responses; i++) { - if (sftp_packet_ask(sftp, valid_responses[i], request_id, + if(sftp_packet_ask(sftp, valid_responses[i], request_id, data, data_len) == 0) { /* * Set to zero before all returns to say * the timeout is not active */ sftp->requirev_start = 0; + + if (*data_len < required_size) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } + return LIBSSH2_ERROR_NONE; } } rc = sftp_packet_read(sftp); - if ((rc < 0) && (rc != LIBSSH2_ERROR_EAGAIN)) { + if((rc < 0) && (rc != LIBSSH2_ERROR_EAGAIN)) { sftp->requirev_start = 0; return rc; - } else if (rc <= 0) { + } + else if(rc <= 0) { /* prevent busy-looping */ long left = - LIBSSH2_READ_TIMEOUT - (long)(time(NULL) - sftp->requirev_start); + LIBSSH2_READ_TIMEOUT - + (long)(time(NULL) - sftp->requirev_start); - if (left <= 0) { + if(left <= 0) { sftp->requirev_start = 0; return LIBSSH2_ERROR_TIMEOUT; } - else if (rc == LIBSSH2_ERROR_EAGAIN) { + else if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } } @@ -605,27 +644,27 @@ sftp_attr2bin(unsigned char *p, const LIBSSH2_SFTP_ATTRIBUTES * attrs) /* TODO: When we add SFTP4+ functionality flag_mask can get additional bits */ - if (!attrs) { + if(!attrs) { _libssh2_htonu32(s, 0); return 4; } _libssh2_store_u32(&s, attrs->flags & flag_mask); - if (attrs->flags & LIBSSH2_SFTP_ATTR_SIZE) { + if(attrs->flags & LIBSSH2_SFTP_ATTR_SIZE) { _libssh2_store_u64(&s, attrs->filesize); } - if (attrs->flags & LIBSSH2_SFTP_ATTR_UIDGID) { + if(attrs->flags & LIBSSH2_SFTP_ATTR_UIDGID) { _libssh2_store_u32(&s, attrs->uid); _libssh2_store_u32(&s, attrs->gid); } - if (attrs->flags & LIBSSH2_SFTP_ATTR_PERMISSIONS) { + if(attrs->flags & LIBSSH2_SFTP_ATTR_PERMISSIONS) { _libssh2_store_u32(&s, attrs->permissions); } - if (attrs->flags & LIBSSH2_SFTP_ATTR_ACMODTIME) { + if(attrs->flags & LIBSSH2_SFTP_ATTR_ACMODTIME) { _libssh2_store_u32(&s, attrs->atime); _libssh2_store_u32(&s, attrs->mtime); } @@ -636,39 +675,57 @@ sftp_attr2bin(unsigned char *p, const LIBSSH2_SFTP_ATTRIBUTES * attrs) /* sftp_bin2attr */ static int -sftp_bin2attr(LIBSSH2_SFTP_ATTRIBUTES * attrs, const unsigned char *p) +sftp_bin2attr(LIBSSH2_SFTP_ATTRIBUTES *attrs, const unsigned char *p, + size_t data_len) { - const unsigned char *s = p; + struct string_buf buf; + uint32_t flags = 0; + buf.data = (unsigned char *)p; + buf.dataptr = buf.data; + buf.len = data_len; - memset(attrs, 0, sizeof(LIBSSH2_SFTP_ATTRIBUTES)); - attrs->flags = _libssh2_ntohu32(s); - s += 4; + if(_libssh2_get_u32(&buf, &flags) != 0) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } + attrs->flags = flags; - if (attrs->flags & LIBSSH2_SFTP_ATTR_SIZE) { - attrs->filesize = _libssh2_ntohu64(s); - s += 8; + if(attrs->flags & LIBSSH2_SFTP_ATTR_SIZE) { + if(_libssh2_get_u64(&buf, &(attrs->filesize)) != 0) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } } - if (attrs->flags & LIBSSH2_SFTP_ATTR_UIDGID) { - attrs->uid = _libssh2_ntohu32(s); - s += 4; - attrs->gid = _libssh2_ntohu32(s); - s += 4; + if(attrs->flags & LIBSSH2_SFTP_ATTR_UIDGID) { + uint32_t uid = 0; + uint32_t gid = 0; + if(_libssh2_get_u32(&buf, &uid) != 0 || + _libssh2_get_u32(&buf, &gid) != 0) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } + attrs->uid = uid; + attrs->gid = gid; } - if (attrs->flags & LIBSSH2_SFTP_ATTR_PERMISSIONS) { - attrs->permissions = _libssh2_ntohu32(s); - s += 4; + if(attrs->flags & LIBSSH2_SFTP_ATTR_PERMISSIONS) { + uint32_t permissions; + if(_libssh2_get_u32(&buf, &permissions) != 0) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } + attrs->permissions = permissions; } - if (attrs->flags & LIBSSH2_SFTP_ATTR_ACMODTIME) { - attrs->atime = _libssh2_ntohu32(s); - s += 4; - attrs->mtime = _libssh2_ntohu32(s); - s += 4; + if(attrs->flags & LIBSSH2_SFTP_ATTR_ACMODTIME) { + uint32_t atime; + uint32_t mtime; + if(_libssh2_get_u32(&buf, &atime) != 0 || + _libssh2_get_u32(&buf, &mtime) != 0) { + return LIBSSH2_ERROR_BUFFER_TOO_SMALL; + } + attrs->atime = atime; + attrs->mtime = mtime; } - return (s - p); + return (buf.dataptr - buf.data); } /* ************ @@ -688,12 +745,12 @@ LIBSSH2_CHANNEL_CLOSE_FUNC(libssh2_sftp_dtor) (void) channel; /* Free the partial packet storage for sftp_packet_read */ - if (sftp->partial_packet) { + if(sftp->partial_packet) { LIBSSH2_FREE(session, sftp->partial_packet); } /* Free the packet storage for _libssh2_sftp_packet_readdir */ - if (sftp->readdir_packet) { + if(sftp->readdir_packet) { LIBSSH2_FREE(session, sftp->readdir_packet); } @@ -707,12 +764,14 @@ LIBSSH2_CHANNEL_CLOSE_FUNC(libssh2_sftp_dtor) */ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) { - unsigned char *data, *s; + unsigned char *data; size_t data_len; ssize_t rc; LIBSSH2_SFTP *sftp_handle; + struct string_buf buf; + unsigned char *endp; - if (session->sftpInit_state == libssh2_NB_state_idle) { + if(session->sftpInit_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Initializing SFTP subsystem"); @@ -735,13 +794,13 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) sftp_handle = session->sftpInit_sftp; - if (session->sftpInit_state == libssh2_NB_state_created) { + if(session->sftpInit_state == libssh2_NB_state_created) { session->sftpInit_channel = _libssh2_channel_open(session, "session", sizeof("session") - 1, LIBSSH2_CHANNEL_WINDOW_DEFAULT, LIBSSH2_CHANNEL_PACKET_DEFAULT, NULL, 0); - if (!session->sftpInit_channel) { - if (libssh2_session_last_errno(session) == LIBSSH2_ERROR_EAGAIN) { + if(!session->sftpInit_channel) { + if(libssh2_session_last_errno(session) == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block starting up channel"); } @@ -756,16 +815,18 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) session->sftpInit_state = libssh2_NB_state_sent; } - if (session->sftpInit_state == libssh2_NB_state_sent) { + if(session->sftpInit_state == libssh2_NB_state_sent) { int ret = _libssh2_channel_process_startup(session->sftpInit_channel, "subsystem", - sizeof("subsystem") - 1, "sftp", + sizeof("subsystem") - 1, + "sftp", strlen("sftp")); - if (ret == LIBSSH2_ERROR_EAGAIN) { + if(ret == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block to request SFTP subsystem"); return NULL; - } else if (ret) { + } + else if(ret) { _libssh2_error(session, LIBSSH2_ERROR_CHANNEL_FAILURE, "Unable to request SFTP subsystem"); goto sftp_init_error; @@ -774,10 +835,10 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) session->sftpInit_state = libssh2_NB_state_sent1; } - if (session->sftpInit_state == libssh2_NB_state_sent1) { + if(session->sftpInit_state == libssh2_NB_state_sent1) { rc = _libssh2_channel_extended_data(session->sftpInit_channel, - LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE); - if (rc == LIBSSH2_ERROR_EAGAIN) { + LIBSSH2_CHANNEL_EXTENDED_DATA_IGNORE); + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block requesting handle extended data"); return NULL; @@ -786,7 +847,7 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) sftp_handle = session->sftpInit_sftp = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_SFTP)); - if (!sftp_handle) { + if(!sftp_handle) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate a new SFTP structure"); goto sftp_init_error; @@ -800,19 +861,20 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) session->sftpInit_sent = 0; /* nothing's sent yet */ _libssh2_debug(session, LIBSSH2_TRACE_SFTP, - "Sending FXP_INIT packet advertising version %d support", + "Sending FXP_INIT packet advertising " + "version %d support", (int) LIBSSH2_SFTP_VERSION); session->sftpInit_state = libssh2_NB_state_sent2; } - if (session->sftpInit_state == libssh2_NB_state_sent2) { + if(session->sftpInit_state == libssh2_NB_state_sent2) { /* sent off what's left of the init buffer to send */ rc = _libssh2_channel_write(session->sftpInit_channel, 0, session->sftpInit_buffer + session->sftpInit_sent, 9 - session->sftpInit_sent); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block sending SSH_FXP_INIT"); return NULL; @@ -835,25 +897,38 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) } rc = sftp_packet_require(sftp_handle, SSH_FXP_VERSION, - 0, &data, &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) + 0, &data, &data_len, 5); + if(rc == LIBSSH2_ERROR_EAGAIN) { + _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block receiving SSH_FXP_VERSION"); return NULL; - else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Invalid SSH_FXP_VERSION response"); + goto sftp_init_error; + } + else if(rc) { _libssh2_error(session, rc, "Timeout waiting for response from SFTP subsystem"); goto sftp_init_error; } - if (data_len < 5) { - _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, - "Invalid SSH_FXP_VERSION response"); + + buf.data = data; + buf.dataptr = buf.data + 1; + buf.len = data_len; + endp = &buf.data[data_len]; + + if(_libssh2_get_u32(&buf, &(sftp_handle->version)) != 0) { LIBSSH2_FREE(session, data); + rc = LIBSSH2_ERROR_BUFFER_TOO_SMALL; goto sftp_init_error; } - s = data + 1; - sftp_handle->version = _libssh2_ntohu32(s); - s += 4; - if (sftp_handle->version > LIBSSH2_SFTP_VERSION) { + if(sftp_handle->version > LIBSSH2_SFTP_VERSION) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Truncating remote SFTP version from %lu", sftp_handle->version); @@ -862,20 +937,22 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Enabling SFTP version %lu compatibility", sftp_handle->version); - while (s < (data + data_len)) { - size_t extname_len, extdata_len; - - extname_len = _libssh2_ntohu32(s); - s += 4; - /* the extension name starts here */ - s += extname_len; + while(buf.dataptr < endp) { + unsigned char *extname, *extdata; - extdata_len = _libssh2_ntohu32(s); - s += 4; - - /* TODO: Actually process extensions */ - s += extdata_len; + if(_libssh2_get_string(&buf, &extname, NULL)) { + LIBSSH2_FREE(session, data); + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short when extracting extname"); + goto sftp_init_error; + } + if(_libssh2_get_string(&buf, &extdata, NULL)) { + LIBSSH2_FREE(session, data); + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "Data too short when extracting extdata"); + goto sftp_init_error; + } } LIBSSH2_FREE(session, data); @@ -895,10 +972,10 @@ static LIBSSH2_SFTP *sftp_init(LIBSSH2_SESSION *session) return sftp_handle; sftp_init_error: - while (_libssh2_channel_free(session->sftpInit_channel) == + while(_libssh2_channel_free(session->sftpInit_channel) == LIBSSH2_ERROR_EAGAIN); session->sftpInit_channel = NULL; - if (session->sftpInit_sftp) { + if(session->sftpInit_sftp) { LIBSSH2_FREE(session, session->sftpInit_sftp); session->sftpInit_sftp = NULL; } @@ -941,55 +1018,55 @@ sftp_shutdown(LIBSSH2_SFTP *sftp) /* * Make sure all memory used in the state variables are free */ - if (sftp->partial_packet) { + if(sftp->partial_packet) { LIBSSH2_FREE(session, sftp->partial_packet); sftp->partial_packet = NULL; } - if (sftp->open_packet) { + if(sftp->open_packet) { LIBSSH2_FREE(session, sftp->open_packet); sftp->open_packet = NULL; } - if (sftp->readdir_packet) { + if(sftp->readdir_packet) { LIBSSH2_FREE(session, sftp->readdir_packet); sftp->readdir_packet = NULL; } - if (sftp->fstat_packet) { + if(sftp->fstat_packet) { LIBSSH2_FREE(session, sftp->fstat_packet); sftp->fstat_packet = NULL; } - if (sftp->unlink_packet) { + if(sftp->unlink_packet) { LIBSSH2_FREE(session, sftp->unlink_packet); sftp->unlink_packet = NULL; } - if (sftp->rename_packet) { + if(sftp->rename_packet) { LIBSSH2_FREE(session, sftp->rename_packet); sftp->rename_packet = NULL; } - if (sftp->fstatvfs_packet) { + if(sftp->fstatvfs_packet) { LIBSSH2_FREE(session, sftp->fstatvfs_packet); sftp->fstatvfs_packet = NULL; } - if (sftp->statvfs_packet) { + if(sftp->statvfs_packet) { LIBSSH2_FREE(session, sftp->statvfs_packet); sftp->statvfs_packet = NULL; } - if (sftp->mkdir_packet) { + if(sftp->mkdir_packet) { LIBSSH2_FREE(session, sftp->mkdir_packet); sftp->mkdir_packet = NULL; } - if (sftp->rmdir_packet) { + if(sftp->rmdir_packet) { LIBSSH2_FREE(session, sftp->rmdir_packet); sftp->rmdir_packet = NULL; } - if (sftp->stat_packet) { + if(sftp->stat_packet) { LIBSSH2_FREE(session, sftp->stat_packet); sftp->stat_packet = NULL; } - if (sftp->symlink_packet) { + if(sftp->symlink_packet) { LIBSSH2_FREE(session, sftp->symlink_packet); sftp->symlink_packet = NULL; } - if (sftp->fsync_packet) { + if(sftp->fsync_packet) { LIBSSH2_FREE(session, sftp->fsync_packet); sftp->fsync_packet = NULL; } @@ -1038,16 +1115,17 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, ssize_t rc; int open_file = (open_type == LIBSSH2_SFTP_OPENFILE)?1:0; - if (sftp->open_state == libssh2_NB_state_idle) { + if(sftp->open_state == libssh2_NB_state_idle) { /* packet_len(4) + packet_type(1) + request_id(4) + filename_len(4) + flags(4) */ sftp->open_packet_len = filename_len + 13 + - (open_file? (4 + sftp_attrsize(LIBSSH2_SFTP_ATTR_PERMISSIONS)) : 0); + (open_file? (4 + + sftp_attrsize(LIBSSH2_SFTP_ATTR_PERMISSIONS)) : 0); /* surprise! this starts out with nothing sent */ sftp->open_packet_sent = 0; s = sftp->open_packet = LIBSSH2_ALLOC(session, sftp->open_packet_len); - if (!sftp->open_packet) { + if(!sftp->open_packet) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_OPEN or " "FXP_OPENDIR packet"); @@ -1064,7 +1142,7 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, _libssh2_store_u32(&s, sftp->open_request_id); _libssh2_store_str(&s, filename, filename_len); - if (open_file) { + if(open_file) { _libssh2_store_u32(&s, flags); s += sftp_attr2bin(s, &attrs); } @@ -1075,14 +1153,15 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, sftp->open_state = libssh2_NB_state_created; } - if (sftp->open_state == libssh2_NB_state_created) { + if(sftp->open_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, sftp->open_packet+ sftp->open_packet_sent, sftp->open_packet_len - sftp->open_packet_sent); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, - "Would block sending FXP_OPEN or FXP_OPENDIR command"); + "Would block sending FXP_OPEN or " + "FXP_OPENDIR command"); return NULL; } else if(rc < 0) { @@ -1105,21 +1184,29 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, } } - if (sftp->open_state == libssh2_NB_state_sent) { + if(sftp->open_state == libssh2_NB_state_sent) { size_t data_len; unsigned char *data; static const unsigned char fopen_responses[2] = { SSH_FXP_HANDLE, SSH_FXP_STATUS }; rc = sftp_packet_requirev(sftp, 2, fopen_responses, sftp->open_request_id, &data, - &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &data_len, 1); + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block waiting for status message"); return NULL; } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Response too small"); + return NULL; + } sftp->open_state = libssh2_NB_state_idle; - if (rc) { + if(rc) { _libssh2_error(session, rc, "Timeout waiting for status message"); return NULL; } @@ -1128,7 +1215,7 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, a fine response while STATUS means error. It seems though that at times we get an SSH_FX_OK back in a STATUS, followed the "real" HANDLE so we need to properly deal with that. */ - if (data[0] == SSH_FXP_STATUS) { + if(data[0] == SSH_FXP_STATUS) { int badness = 1; if(data_len < 9) { @@ -1141,19 +1228,28 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, sftp->last_errno = _libssh2_ntohu32(data + 5); if(LIBSSH2_FX_OK == sftp->last_errno) { - _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "got HANDLE FXOK!"); + _libssh2_debug(session, LIBSSH2_TRACE_SFTP, + "got HANDLE FXOK!"); LIBSSH2_FREE(session, data); /* silly situation, but check for a HANDLE */ rc = sftp_packet_require(sftp, SSH_FXP_HANDLE, sftp->open_request_id, &data, - &data_len); + &data_len, 10); if(rc == LIBSSH2_ERROR_EAGAIN) { /* go back to sent state and wait for something else */ sftp->open_state = libssh2_NB_state_sent; return NULL; } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Too small FXP_HANDLE"); + return NULL; + } else if(!rc) /* we got the handle so this is not a bad situation */ badness = 0; @@ -1162,7 +1258,8 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, if(badness) { _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "Failed opening remote file"); - _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "got FXP_STATUS %d", + _libssh2_debug(session, LIBSSH2_TRACE_SFTP, + "got FXP_STATUS %d", sftp->last_errno); LIBSSH2_FREE(session, data); return NULL; @@ -1177,7 +1274,7 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, } fp = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_SFTP_HANDLE)); - if (!fp) { + if(!fp) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate new SFTP handle structure"); LIBSSH2_FREE(session, data); @@ -1187,7 +1284,7 @@ sftp_open(LIBSSH2_SFTP *sftp, const char *filename, LIBSSH2_SFTP_HANDLE_DIR; fp->handle_len = _libssh2_ntohu32(data + 5); - if (fp->handle_len > SFTP_HANDLE_MAXLEN) + if(fp->handle_len > SFTP_HANDLE_MAXLEN) /* SFTP doesn't allow handles longer than 256 characters */ fp->handle_len = SFTP_HANDLE_MAXLEN; @@ -1283,7 +1380,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, and second phases on the next call and resume sending. */ - switch (sftp->read_state) { + switch(sftp->read_state) { case libssh2_NB_state_idle: /* Some data may already have been read from the server in the @@ -1307,9 +1404,10 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, return copy; } - if (filep->eof) { + if(filep->eof) { return 0; - } else { + } + else { /* We allow a number of bytes being requested at any given time without having been acked - until we reach EOF. */ @@ -1330,7 +1428,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, /* 'count' is how much more data to ask for, and 'already' is how much data that already has been asked for but not yet returned. - Specificly, 'count' means how much data that have or will be + Specifically, 'count' means how much data that have or will be asked for by the nodes that are already added to the linked list. Some of those read requests may not actually have been sent off successfully yet. @@ -1360,7 +1458,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, at next call */ assert(rc != LIBSSH2_ERROR_EAGAIN || !filep->data_left); assert(rc != LIBSSH2_ERROR_EAGAIN || !filep->eof); - if (rc) + if(rc) return rc; } } @@ -1374,14 +1472,14 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, uint32_t request_id; uint32_t size = count; - if (size < buffer_size) + if(size < buffer_size) size = buffer_size; - if (size > MAX_SFTP_READ_SIZE) + if(size > MAX_SFTP_READ_SIZE) size = MAX_SFTP_READ_SIZE; chunk = LIBSSH2_ALLOC(session, packet_len + sizeof(struct sftp_pipeline_chunk)); - if (!chunk) + if(!chunk) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "malloc fail for FXP_WRITE"); @@ -1404,13 +1502,13 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, /* add this new entry LAST in the list */ _libssh2_list_add(&handle->packet_list, &chunk->node); - count -= MIN(size,count); /* deduct the size we used, as we might - * have to create more packets */ + count -= MIN(size, count); /* deduct the size we used, as we might + * have to create more packets */ _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "read request id %d sent (offset: %d, size: %d)", request_id, (int)chunk->offset, (int)chunk->len); } - + /* FALL-THROUGH */ case libssh2_NB_state_sent: sftp->read_state = libssh2_NB_state_idle; @@ -1438,9 +1536,10 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, /* We still have data left to send for this chunk. * If there is at least one completely sent chunk, * we can get out of this loop and start reading. */ - if (chunk != _libssh2_list_first(&handle->packet_list)) { + if(chunk != _libssh2_list_first(&handle->packet_list)) { break; - } else { + } + else { continue; } } @@ -1449,6 +1548,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, /* move on to the next chunk with data to send */ chunk = _libssh2_list_next(&chunk->node); } + /* FALL-THROUGH */ case libssh2_NB_state_sent2: @@ -1470,9 +1570,10 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, if(chunk->lefttosend) { /* if the chunk still has data left to send, we shouldn't wait for an ACK for it just yet */ - if (bytes_in_buffer > 0) { + if(bytes_in_buffer > 0) { return bytes_in_buffer; - } else { + } + else { /* we should never reach this point */ return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "sftp_read() internal error"); @@ -1480,15 +1581,21 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, } rc = sftp_packet_requirev(sftp, 2, read_responses, - chunk->request_id, &data, &data_len); - - if (rc==LIBSSH2_ERROR_EAGAIN && bytes_in_buffer != 0) { + chunk->request_id, &data, &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN && bytes_in_buffer != 0) { /* do not return EAGAIN if we have already * written data into the buffer */ return bytes_in_buffer; } - if (rc < 0) { + if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Response too small"); + } + else if(rc < 0) { sftp->read_state = libssh2_NB_state_sent2; return rc; } @@ -1498,7 +1605,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, * FX_EOF when we reach the end of the file. */ - switch (data[0]) { + switch(data[0]) { case SSH_FXP_STATUS: /* remove the chunk we just processed */ @@ -1512,7 +1619,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, rc32 = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (rc32 == LIBSSH2_FX_EOF) { + if(rc32 == LIBSSH2_FX_EOF) { filep->eof = TRUE; return bytes_in_buffer; } @@ -1524,7 +1631,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, break; case SSH_FXP_DATA: - if (chunk->offset != filep->offset) { + if(chunk->offset != filep->offset) { /* This could happen if the server returns less bytes than requested, which shouldn't happen for normal files. See: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 @@ -1535,7 +1642,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, } rc32 = _libssh2_ntohu32(data + 5); - if (rc32 > (data_len - 9)) + if(rc32 > (data_len - 9)) return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "SFTP Protocol badness"); @@ -1589,9 +1696,10 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, /* check if we have space left in the buffer * and either continue to the next chunk or stop */ - if (bytes_in_buffer < buffer_size) { + if(bytes_in_buffer < buffer_size) { chunk = next; - } else { + } + else { chunk = NULL; } @@ -1603,7 +1711,7 @@ static ssize_t sftp_read(LIBSSH2_SFTP_HANDLE * handle, char *buffer, } } - if (bytes_in_buffer > 0) + if(bytes_in_buffer > 0) return bytes_in_buffer; break; @@ -1652,8 +1760,8 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, SSH_FXP_NAME, SSH_FXP_STATUS }; ssize_t retcode; - if (sftp->readdir_state == libssh2_NB_state_idle) { - if (handle->u.dir.names_left) { + if(sftp->readdir_state == libssh2_NB_state_idle) { + if(handle->u.dir.names_left) { /* * A prior request returned more than one directory entry, * feed it back from the buffer @@ -1663,29 +1771,53 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, size_t real_filename_len; size_t filename_len; size_t longentry_len; - - s = (unsigned char *) handle->u.dir.next_name; - real_filename_len = _libssh2_ntohu32(s); - - s += 4; + size_t names_packet_len = handle->u.dir.names_packet_len; + int attr_len = 0; + + if(names_packet_len >= 4) { + s = (unsigned char *) handle->u.dir.next_name; + real_filename_len = _libssh2_ntohu32(s); + s += 4; + names_packet_len -= 4; + } + else { + filename_len = (size_t)LIBSSH2_ERROR_BUFFER_TOO_SMALL; + goto end; + } filename_len = real_filename_len; - if (filename_len >= buffer_maxlen) { + if(filename_len >= buffer_maxlen) { filename_len = (size_t)LIBSSH2_ERROR_BUFFER_TOO_SMALL; goto end; } - memcpy(buffer, s, filename_len); - buffer[filename_len] = '\0'; /* zero terminate */ - s += real_filename_len; + if(buffer_maxlen >= filename_len && names_packet_len >= + filename_len) { + memcpy(buffer, s, filename_len); + buffer[filename_len] = '\0'; /* zero terminate */ + s += real_filename_len; + names_packet_len -= real_filename_len; + } + else { + filename_len = (size_t)LIBSSH2_ERROR_BUFFER_TOO_SMALL; + goto end; + } - real_longentry_len = _libssh2_ntohu32(s); - s += 4; + if(names_packet_len >= 4) { + real_longentry_len = _libssh2_ntohu32(s); + s += 4; + names_packet_len -= 4; + } + else { + filename_len = (size_t)LIBSSH2_ERROR_BUFFER_TOO_SMALL; + goto end; + } - if (longentry && (longentry_maxlen>1)) { + if(longentry && (longentry_maxlen>1)) { longentry_len = real_longentry_len; - if (longentry_len >= longentry_maxlen) { + if(longentry_len >= longentry_maxlen || + longentry_len > names_packet_len) { filename_len = (size_t)LIBSSH2_ERROR_BUFFER_TOO_SMALL; goto end; } @@ -1693,17 +1825,36 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, memcpy(longentry, s, longentry_len); longentry[longentry_len] = '\0'; /* zero terminate */ } - s += real_longentry_len; - if (attrs) + if(real_longentry_len <= names_packet_len) { + s += real_longentry_len; + names_packet_len -= real_longentry_len; + } + else { + filename_len = (size_t)LIBSSH2_ERROR_BUFFER_TOO_SMALL; + goto end; + } + + if(attrs) memset(attrs, 0, sizeof(LIBSSH2_SFTP_ATTRIBUTES)); - s += sftp_bin2attr(attrs ? attrs : &attrs_dummy, s); + attr_len = sftp_bin2attr(attrs ? attrs : &attrs_dummy, s, + names_packet_len); + + if(attr_len >= 0) { + s += attr_len; + names_packet_len -= attr_len; + } + else { + filename_len = (size_t)LIBSSH2_ERROR_BUFFER_TOO_SMALL; + goto end; + } handle->u.dir.next_name = (char *) s; + handle->u.dir.names_packet_len = names_packet_len; end: - if ((--handle->u.dir.names_left) == 0) + if((--handle->u.dir.names_left) == 0) LIBSSH2_FREE(session, handle->u.dir.names_packet); _libssh2_debug(session, LIBSSH2_TRACE_SFTP, @@ -1715,7 +1866,7 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, /* Request another entry(entries?) */ s = sftp->readdir_packet = LIBSSH2_ALLOC(session, packet_len); - if (!sftp->readdir_packet) + if(!sftp->readdir_packet) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "FXP_READDIR packet"); @@ -1729,15 +1880,15 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, sftp->readdir_state = libssh2_NB_state_created; } - if (sftp->readdir_state == libssh2_NB_state_created) { + if(sftp->readdir_state == libssh2_NB_state_created) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Reading entries from directory handle"); retcode = _libssh2_channel_write(channel, 0, sftp->readdir_packet, packet_len); - if (retcode == LIBSSH2_ERROR_EAGAIN) { + if(retcode == LIBSSH2_ERROR_EAGAIN) { return retcode; } - else if ((ssize_t)packet_len != retcode) { + else if((ssize_t)packet_len != retcode) { LIBSSH2_FREE(session, sftp->readdir_packet); sftp->readdir_packet = NULL; sftp->readdir_state = libssh2_NB_state_idle; @@ -1753,19 +1904,26 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, retcode = sftp_packet_requirev(sftp, 2, read_responses, sftp->readdir_request_id, &data, - &data_len); - if (retcode == LIBSSH2_ERROR_EAGAIN) + &data_len, 9); + if(retcode == LIBSSH2_ERROR_EAGAIN) return retcode; - else if (retcode) { + else if(retcode == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Status message too short"); + } + else if(retcode) { sftp->readdir_state = libssh2_NB_state_idle; return _libssh2_error(session, retcode, "Timeout waiting for status message"); } - if (data[0] == SSH_FXP_STATUS) { + if(data[0] == SSH_FXP_STATUS) { retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode == LIBSSH2_FX_EOF) { + if(retcode == LIBSSH2_FX_EOF) { sftp->readdir_state = libssh2_NB_state_idle; return 0; } @@ -1782,7 +1940,7 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, num_names = _libssh2_ntohu32(data + 5); _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "%lu entries returned", num_names); - if (!num_names) { + if(!num_names) { LIBSSH2_FREE(session, data); return 0; } @@ -1790,6 +1948,7 @@ static ssize_t sftp_readdir(LIBSSH2_SFTP_HANDLE *handle, char *buffer, handle->u.dir.names_left = num_names; handle->u.dir.names_packet = data; handle->u.dir.next_name = (char *) data + 9; + handle->u.dir.names_packet_len = data_len - 9; /* use the name popping mechanism from the start of the function */ return sftp_readdir(handle, buffer, buffer_maxlen, longentry, @@ -1873,14 +2032,15 @@ static ssize_t sftp_write(LIBSSH2_SFTP_HANDLE *handle, const char *buffer, default: case libssh2_NB_state_idle: - /* Number of bytes sent off that haven't been acked and therefor we + /* Number of bytes sent off that haven't been acked and therefore we will get passed in here again. Also, add up the number of bytes that actually already have been acked but we haven't been able to return as such yet, so we will get that data as well passed in here again. */ - already = (size_t) (handle->u.file.offset_sent - handle->u.file.offset)+ + already = (size_t) (handle->u.file.offset_sent - + handle->u.file.offset)+ handle->u.file.acked; if(count >= already) { @@ -1905,7 +2065,7 @@ static ssize_t sftp_write(LIBSSH2_SFTP_HANDLE *handle, const char *buffer, chunk = LIBSSH2_ALLOC(session, packet_len + sizeof(struct sftp_pipeline_chunk)); - if (!chunk) + if(!chunk) return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "malloc fail for FXP_WRITE"); @@ -1933,8 +2093,8 @@ static ssize_t sftp_write(LIBSSH2_SFTP_HANDLE *handle, const char *buffer, to create more packets */ } - /* move through the WRITE packets that haven't been sent and send as many - as possible - remember that we don't block */ + /* move through the WRITE packets that haven't been sent and send as + many as possible - remember that we don't block */ chunk = _libssh2_list_first(&handle->packet_list); while(chunk) { @@ -1981,9 +2141,16 @@ static ssize_t sftp_write(LIBSSH2_SFTP_HANDLE *handle, const char *buffer, /* we check the packets in order */ rc = sftp_packet_require(sftp, SSH_FXP_STATUS, - chunk->request_id, &data, &data_len); - if (rc < 0) { - if (rc == LIBSSH2_ERROR_EAGAIN) + chunk->request_id, &data, &data_len, 9); + if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "FXP write packet too short"); + } + else if(rc < 0) { + if(rc == LIBSSH2_ERROR_EAGAIN) sftp->write_state = libssh2_NB_state_sent; return rc; } @@ -1992,7 +2159,7 @@ static ssize_t sftp_write(LIBSSH2_SFTP_HANDLE *handle, const char *buffer, LIBSSH2_FREE(session, data); sftp->last_errno = retcode; - if (retcode == LIBSSH2_FX_OK) { + if(retcode == LIBSSH2_FX_OK) { acked += chunk->len; /* number of payload data that was acked here */ @@ -2012,7 +2179,8 @@ static ssize_t sftp_write(LIBSSH2_SFTP_HANDLE *handle, const char *buffer, /* since we return error now, the application will not get any outstanding data acked, so we need to rewind the offset to - where the application knows it has reached with acked data */ + where the application knows it has reached with acked + data */ handle->u.file.offset -= handle->u.file.acked; /* then reset the offset_sent to be the same as the offset */ @@ -2022,8 +2190,8 @@ static ssize_t sftp_write(LIBSSH2_SFTP_HANDLE *handle, const char *buffer, ack after an error */ handle->u.file.acked = 0; - /* the server returned an error for that written chunk, propagate - this back to our parent function */ + /* the server returned an error for that written chunk, + propagate this back to our parent function */ return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "FXP write failed"); } @@ -2082,11 +2250,11 @@ static int sftp_fsync(LIBSSH2_SFTP_HANDLE *handle) ssize_t rc; uint32_t retcode; - if (sftp->fsync_state == libssh2_NB_state_idle) { + if(sftp->fsync_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Issuing fsync command"); s = packet = LIBSSH2_ALLOC(session, packet_len); - if (!packet) { + if(!packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_EXTENDED " "packet"); @@ -2100,13 +2268,14 @@ static int sftp_fsync(LIBSSH2_SFTP_HANDLE *handle) _libssh2_store_str(&s, handle->handle, handle->handle_len); sftp->fsync_state = libssh2_NB_state_created; - } else { + } + else { packet = sftp->fsync_packet; } - if (sftp->fsync_state == libssh2_NB_state_created) { + if(sftp->fsync_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN || + if(rc == LIBSSH2_ERROR_EAGAIN || (0 <= rc && rc < (ssize_t)packet_len)) { sftp->fsync_packet = packet; return LIBSSH2_ERROR_EAGAIN; @@ -2115,7 +2284,7 @@ static int sftp_fsync(LIBSSH2_SFTP_HANDLE *handle) LIBSSH2_FREE(session, packet); sftp->fsync_packet = NULL; - if (rc < 0) { + if(rc < 0) { sftp->fsync_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "_libssh2_channel_write() failed"); @@ -2124,10 +2293,18 @@ static int sftp_fsync(LIBSSH2_SFTP_HANDLE *handle) } rc = sftp_packet_require(sftp, SSH_FXP_STATUS, - sftp->fsync_request_id, &data, &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + sftp->fsync_request_id, &data, &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP fsync packet too short"); + } + else if(rc) { sftp->fsync_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Error waiting for FXP EXTENDED REPLY"); @@ -2138,7 +2315,7 @@ static int sftp_fsync(LIBSSH2_SFTP_HANDLE *handle) retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode != LIBSSH2_FX_OK) { + if(retcode != LIBSSH2_FX_OK) { sftp->last_errno = retcode; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "fsync failed"); @@ -2182,11 +2359,11 @@ static int sftp_fstat(LIBSSH2_SFTP_HANDLE *handle, { SSH_FXP_ATTRS, SSH_FXP_STATUS }; ssize_t rc; - if (sftp->fstat_state == libssh2_NB_state_idle) { + if(sftp->fstat_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Issuing %s command", setstat ? "set-stat" : "stat"); s = sftp->fstat_packet = LIBSSH2_ALLOC(session, packet_len); - if (!sftp->fstat_packet) { + if(!sftp->fstat_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "FSTAT/FSETSTAT packet"); @@ -2198,20 +2375,20 @@ static int sftp_fstat(LIBSSH2_SFTP_HANDLE *handle, _libssh2_store_u32(&s, sftp->fstat_request_id); _libssh2_store_str(&s, handle->handle, handle->handle_len); - if (setstat) { + if(setstat) { s += sftp_attr2bin(s, attrs); } sftp->fstat_state = libssh2_NB_state_created; } - if (sftp->fstat_state == libssh2_NB_state_created) { + if(sftp->fstat_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, sftp->fstat_packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } - else if ((ssize_t)packet_len != rc) { + else if((ssize_t)packet_len != rc) { LIBSSH2_FREE(session, sftp->fstat_packet); sftp->fstat_packet = NULL; sftp->fstat_state = libssh2_NB_state_idle; @@ -2227,10 +2404,17 @@ static int sftp_fstat(LIBSSH2_SFTP_HANDLE *handle, rc = sftp_packet_requirev(sftp, 2, fstat_responses, sftp->fstat_request_id, &data, - &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) + &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; - else if (rc) { + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP fstat packet too short"); + } + else if(rc) { sftp->fstat_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Timeout waiting for status message"); @@ -2238,21 +2422,27 @@ static int sftp_fstat(LIBSSH2_SFTP_HANDLE *handle, sftp->fstat_state = libssh2_NB_state_idle; - if (data[0] == SSH_FXP_STATUS) { + if(data[0] == SSH_FXP_STATUS) { uint32_t retcode; retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode == LIBSSH2_FX_OK) { + if(retcode == LIBSSH2_FX_OK) { return 0; - } else { + } + else { sftp->last_errno = retcode; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "SFTP Protocol Error"); } } - sftp_bin2attr(attrs, data + 5); + if(sftp_bin2attr(attrs, data + 5, data_len - 5) < 0) { + LIBSSH2_FREE(session, data); + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Attributes too short in SFTP fstat"); + } + LIBSSH2_FREE(session, data); return 0; @@ -2290,7 +2480,7 @@ libssh2_sftp_seek64(LIBSSH2_SFTP_HANDLE *handle, libssh2_uint64_t offset) sftp_packetlist_flush(handle); /* free the left received buffered data */ - if (handle->u.file.data_left) { + if(handle->u.file.data_left) { LIBSSH2_FREE(handle->sftp->channel->session, handle->u.file.data); handle->u.file.data_left = handle->u.file.data_len = 0; handle->u.file.data = NULL; @@ -2391,15 +2581,16 @@ sftp_close_handle(LIBSSH2_SFTP_HANDLE *handle) unsigned char *s, *data = NULL; int rc = 0; - if (handle->close_state == libssh2_NB_state_idle) { + if(handle->close_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Closing handle"); s = handle->close_packet = LIBSSH2_ALLOC(session, packet_len); - if (!handle->close_packet) { + if(!handle->close_packet) { handle->close_state = libssh2_NB_state_idle; rc = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_CLOSE " "packet"); - } else { + } + else { _libssh2_store_u32(&s, packet_len - 4); *(s++) = SSH_FXP_CLOSE; @@ -2410,30 +2601,40 @@ sftp_close_handle(LIBSSH2_SFTP_HANDLE *handle) } } - if (handle->close_state == libssh2_NB_state_created) { + if(handle->close_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, handle->close_packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if ((ssize_t)packet_len != rc) { + } + else if((ssize_t)packet_len != rc) { handle->close_state = libssh2_NB_state_idle; rc = _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send FXP_CLOSE command"); - } else + } + else handle->close_state = libssh2_NB_state_sent; LIBSSH2_FREE(session, handle->close_packet); handle->close_packet = NULL; } - if (handle->close_state == libssh2_NB_state_sent) { + if(handle->close_state == libssh2_NB_state_sent) { rc = sftp_packet_require(sftp, SSH_FXP_STATUS, handle->close_request_id, &data, - &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - - } else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + data = NULL; + _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Packet too short in FXP_CLOSE command"); + } + else if(rc) { _libssh2_error(session, rc, "Error waiting for status message"); } @@ -2446,11 +2647,12 @@ sftp_close_handle(LIBSSH2_SFTP_HANDLE *handle) happened for which we should have set an error code */ assert(rc); - } else { + } + else { int retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode != LIBSSH2_FX_OK) { + if(retcode != LIBSSH2_FX_OK) { sftp->last_errno = retcode; handle->close_state = libssh2_NB_state_idle; rc = _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, @@ -2461,11 +2663,11 @@ sftp_close_handle(LIBSSH2_SFTP_HANDLE *handle) /* remove this handle from the parent's list */ _libssh2_list_remove(&handle->node); - if ((handle->handle_type == LIBSSH2_SFTP_HANDLE_DIR) - && handle->u.dir.names_left) { - LIBSSH2_FREE(session, handle->u.dir.names_packet); + if(handle->handle_type == LIBSSH2_SFTP_HANDLE_DIR) { + if(handle->u.dir.names_left) + LIBSSH2_FREE(session, handle->u.dir.names_packet); } - else { + else if(handle->handle_type == LIBSSH2_SFTP_HANDLE_FILE) { if(handle->u.file.data) LIBSSH2_FREE(session, handle->u.file.data); } @@ -2510,10 +2712,10 @@ static int sftp_unlink(LIBSSH2_SFTP *sftp, const char *filename, unsigned char *s, *data; int rc; - if (sftp->unlink_state == libssh2_NB_state_idle) { + if(sftp->unlink_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Unlinking %s", filename); s = sftp->unlink_packet = LIBSSH2_ALLOC(session, packet_len); - if (!sftp->unlink_packet) { + if(!sftp->unlink_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_REMOVE " "packet"); @@ -2527,12 +2729,13 @@ static int sftp_unlink(LIBSSH2_SFTP *sftp, const char *filename, sftp->unlink_state = libssh2_NB_state_created; } - if (sftp->unlink_state == libssh2_NB_state_created) { + if(sftp->unlink_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, sftp->unlink_packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if ((ssize_t)packet_len != rc) { + } + else if((ssize_t)packet_len != rc) { LIBSSH2_FREE(session, sftp->unlink_packet); sftp->unlink_packet = NULL; sftp->unlink_state = libssh2_NB_state_idle; @@ -2547,11 +2750,18 @@ static int sftp_unlink(LIBSSH2_SFTP *sftp, const char *filename, rc = sftp_packet_require(sftp, SSH_FXP_STATUS, sftp->unlink_request_id, &data, - &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; } - else if (rc) { + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP unlink packet too short"); + } + else if(rc) { sftp->unlink_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Error waiting for FXP STATUS"); @@ -2562,9 +2772,10 @@ static int sftp_unlink(LIBSSH2_SFTP *sftp, const char *filename, retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode == LIBSSH2_FX_OK) { + if(retcode == LIBSSH2_FX_OK) { return 0; - } else { + } + else { sftp->last_errno = retcode; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "SFTP Protocol Error"); @@ -2608,17 +2819,17 @@ static int sftp_rename(LIBSSH2_SFTP *sftp, const char *source_filename, unsigned char *data; ssize_t rc; - if (sftp->version < 2) { + if(sftp->version < 2) { return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "Server does not support RENAME"); } - if (sftp->rename_state == libssh2_NB_state_idle) { + if(sftp->rename_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Renaming %s to %s", source_filename, dest_filename); sftp->rename_s = sftp->rename_packet = LIBSSH2_ALLOC(session, packet_len); - if (!sftp->rename_packet) { + if(!sftp->rename_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_RENAME " "packet"); @@ -2632,18 +2843,19 @@ static int sftp_rename(LIBSSH2_SFTP *sftp, const char *source_filename, source_filename_len); _libssh2_store_str(&sftp->rename_s, dest_filename, dest_filename_len); - if (sftp->version >= 5) + if(sftp->version >= 5) _libssh2_store_u32(&sftp->rename_s, flags); sftp->rename_state = libssh2_NB_state_created; } - if (sftp->rename_state == libssh2_NB_state_created) { + if(sftp->rename_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, sftp->rename_packet, sftp->rename_s - sftp->rename_packet); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if ((ssize_t)packet_len != rc) { + } + else if((ssize_t)packet_len != rc) { LIBSSH2_FREE(session, sftp->rename_packet); sftp->rename_packet = NULL; sftp->rename_state = libssh2_NB_state_idle; @@ -2658,10 +2870,18 @@ static int sftp_rename(LIBSSH2_SFTP *sftp, const char *source_filename, rc = sftp_packet_require(sftp, SSH_FXP_STATUS, sftp->rename_request_id, &data, - &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP rename packet too short"); + } + else if(rc) { sftp->rename_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Error waiting for FXP STATUS"); @@ -2676,7 +2896,7 @@ static int sftp_rename(LIBSSH2_SFTP *sftp, const char *source_filename, /* now convert the SFTP error code to libssh2 return code or error message */ - switch (retcode) { + switch(retcode) { case LIBSSH2_FX_OK: retcode = LIBSSH2_ERROR_NONE; break; @@ -2740,11 +2960,11 @@ static int sftp_fstatvfs(LIBSSH2_SFTP_HANDLE *handle, LIBSSH2_SFTP_STATVFS *st) static const unsigned char responses[2] = { SSH_FXP_EXTENDED_REPLY, SSH_FXP_STATUS }; - if (sftp->fstatvfs_state == libssh2_NB_state_idle) { + if(sftp->fstatvfs_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Getting file system statistics"); s = packet = LIBSSH2_ALLOC(session, packet_len); - if (!packet) { + if(!packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_EXTENDED " "packet"); @@ -2763,9 +2983,9 @@ static int sftp_fstatvfs(LIBSSH2_SFTP_HANDLE *handle, LIBSSH2_SFTP_STATVFS *st) packet = sftp->fstatvfs_packet; } - if (sftp->fstatvfs_state == libssh2_NB_state_created) { + if(sftp->fstatvfs_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN || + if(rc == LIBSSH2_ERROR_EAGAIN || (0 <= rc && rc < (ssize_t)packet_len)) { sftp->fstatvfs_packet = packet; return LIBSSH2_ERROR_EAGAIN; @@ -2774,7 +2994,7 @@ static int sftp_fstatvfs(LIBSSH2_SFTP_HANDLE *handle, LIBSSH2_SFTP_STATVFS *st) LIBSSH2_FREE(session, packet); sftp->fstatvfs_packet = NULL; - if (rc < 0) { + if(rc < 0) { sftp->fstatvfs_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "_libssh2_channel_write() failed"); @@ -2783,17 +3003,25 @@ static int sftp_fstatvfs(LIBSSH2_SFTP_HANDLE *handle, LIBSSH2_SFTP_STATVFS *st) } rc = sftp_packet_requirev(sftp, 2, responses, sftp->fstatvfs_request_id, - &data, &data_len); + &data, &data_len, 9); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP rename packet too short"); + } + else if(rc) { sftp->fstatvfs_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Error waiting for FXP EXTENDED REPLY"); } - if (data[0] == SSH_FXP_STATUS) { + if(data[0] == SSH_FXP_STATUS) { int retcode = _libssh2_ntohu32(data + 5); sftp->fstatvfs_state = libssh2_NB_state_idle; LIBSSH2_FREE(session, data); @@ -2802,7 +3030,7 @@ static int sftp_fstatvfs(LIBSSH2_SFTP_HANDLE *handle, LIBSSH2_SFTP_STATVFS *st) "SFTP Protocol Error"); } - if (data_len < 93) { + if(data_len < 93) { LIBSSH2_FREE(session, data); sftp->fstatvfs_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, @@ -2842,7 +3070,8 @@ libssh2_sftp_fstatvfs(LIBSSH2_SFTP_HANDLE *handle, LIBSSH2_SFTP_STATVFS *st) int rc; if(!handle || !st) return LIBSSH2_ERROR_BAD_USE; - BLOCK_ADJUST(rc, handle->sftp->channel->session, sftp_fstatvfs(handle, st)); + BLOCK_ADJUST(rc, handle->sftp->channel->session, + sftp_fstatvfs(handle, st)); return rc; } @@ -2867,11 +3096,11 @@ static int sftp_statvfs(LIBSSH2_SFTP *sftp, const char *path, static const unsigned char responses[2] = { SSH_FXP_EXTENDED_REPLY, SSH_FXP_STATUS }; - if (sftp->statvfs_state == libssh2_NB_state_idle) { + if(sftp->statvfs_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Getting file system statistics of %s", path); s = packet = LIBSSH2_ALLOC(session, packet_len); - if (!packet) { + if(!packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_EXTENDED " "packet"); @@ -2890,9 +3119,9 @@ static int sftp_statvfs(LIBSSH2_SFTP *sftp, const char *path, packet = sftp->statvfs_packet; } - if (sftp->statvfs_state == libssh2_NB_state_created) { + if(sftp->statvfs_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN || + if(rc == LIBSSH2_ERROR_EAGAIN || (0 <= rc && rc < (ssize_t)packet_len)) { sftp->statvfs_packet = packet; return LIBSSH2_ERROR_EAGAIN; @@ -2901,7 +3130,7 @@ static int sftp_statvfs(LIBSSH2_SFTP *sftp, const char *path, LIBSSH2_FREE(session, packet); sftp->statvfs_packet = NULL; - if (rc < 0) { + if(rc < 0) { sftp->statvfs_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "_libssh2_channel_write() failed"); @@ -2910,16 +3139,24 @@ static int sftp_statvfs(LIBSSH2_SFTP *sftp, const char *path, } rc = sftp_packet_requirev(sftp, 2, responses, sftp->statvfs_request_id, - &data, &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &data, &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP fstat packet too short"); + } + else if(rc) { sftp->statvfs_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Error waiting for FXP EXTENDED REPLY"); } - if (data[0] == SSH_FXP_STATUS) { + if(data[0] == SSH_FXP_STATUS) { int retcode = _libssh2_ntohu32(data + 5); sftp->statvfs_state = libssh2_NB_state_idle; LIBSSH2_FREE(session, data); @@ -2928,7 +3165,7 @@ static int sftp_statvfs(LIBSSH2_SFTP *sftp, const char *path, "SFTP Protocol Error"); } - if (data_len < 93) { + if(data_len < 93) { LIBSSH2_FREE(session, data); sftp->statvfs_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, @@ -2986,27 +3223,32 @@ static int sftp_mkdir(LIBSSH2_SFTP *sftp, const char *path, LIBSSH2_CHANNEL *channel = sftp->channel; LIBSSH2_SESSION *session = channel->session; LIBSSH2_SFTP_ATTRIBUTES attrs = { - LIBSSH2_SFTP_ATTR_PERMISSIONS, 0, 0, 0, 0, 0, 0 + 0, 0, 0, 0, 0, 0, 0 }; size_t data_len; int retcode; - /* 13 = packet_len(4) + packet_type(1) + request_id(4) + path_len(4) */ - ssize_t packet_len = path_len + 13 + - sftp_attrsize(LIBSSH2_SFTP_ATTR_PERMISSIONS); + ssize_t packet_len; unsigned char *packet, *s, *data; int rc; - if (sftp->mkdir_state == libssh2_NB_state_idle) { + if(mode != LIBSSH2_SFTP_DEFAULT_MODE) { + /* Filetype in SFTP 3 and earlier */ + attrs.flags = LIBSSH2_SFTP_ATTR_PERMISSIONS; + attrs.permissions = mode | LIBSSH2_SFTP_ATTR_PFILETYPE_DIR; + } + + /* 13 = packet_len(4) + packet_type(1) + request_id(4) + path_len(4) */ + packet_len = path_len + 13 + sftp_attrsize(attrs.flags); + + if(sftp->mkdir_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Creating directory %s with mode 0%lo", path, mode); s = packet = LIBSSH2_ALLOC(session, packet_len); - if (!packet) { + if(!packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_MKDIR " "packet"); } - /* Filetype in SFTP 3 and earlier */ - attrs.permissions = mode | LIBSSH2_SFTP_ATTR_PFILETYPE_DIR; _libssh2_store_u32(&s, packet_len - 4); *(s++) = SSH_FXP_MKDIR; @@ -3022,13 +3264,13 @@ static int sftp_mkdir(LIBSSH2_SFTP *sftp, const char *path, packet = sftp->mkdir_packet; } - if (sftp->mkdir_state == libssh2_NB_state_created) { + if(sftp->mkdir_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { sftp->mkdir_packet = packet; return rc; } - if (packet_len != rc) { + if(packet_len != rc) { LIBSSH2_FREE(session, packet); sftp->mkdir_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, @@ -3040,10 +3282,18 @@ static int sftp_mkdir(LIBSSH2_SFTP *sftp, const char *path, } rc = sftp_packet_require(sftp, SSH_FXP_STATUS, sftp->mkdir_request_id, - &data, &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &data, &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP mkdir packet too short"); + } + else if(rc) { sftp->mkdir_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Error waiting for FXP STATUS"); @@ -3054,10 +3304,11 @@ static int sftp_mkdir(LIBSSH2_SFTP *sftp, const char *path, retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode == LIBSSH2_FX_OK) { + if(retcode == LIBSSH2_FX_OK) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "OK!"); return 0; - } else { + } + else { sftp->last_errno = retcode; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "SFTP Protocol Error"); @@ -3096,11 +3347,11 @@ static int sftp_rmdir(LIBSSH2_SFTP *sftp, const char *path, unsigned char *s, *data; int rc; - if (sftp->rmdir_state == libssh2_NB_state_idle) { + if(sftp->rmdir_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "Removing directory: %s", path); s = sftp->rmdir_packet = LIBSSH2_ALLOC(session, packet_len); - if (!sftp->rmdir_packet) { + if(!sftp->rmdir_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_RMDIR " "packet"); @@ -3115,12 +3366,13 @@ static int sftp_rmdir(LIBSSH2_SFTP *sftp, const char *path, sftp->rmdir_state = libssh2_NB_state_created; } - if (sftp->rmdir_state == libssh2_NB_state_created) { + if(sftp->rmdir_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, sftp->rmdir_packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (packet_len != rc) { + } + else if(packet_len != rc) { LIBSSH2_FREE(session, sftp->rmdir_packet); sftp->rmdir_packet = NULL; sftp->rmdir_state = libssh2_NB_state_idle; @@ -3134,10 +3386,18 @@ static int sftp_rmdir(LIBSSH2_SFTP *sftp, const char *path, } rc = sftp_packet_require(sftp, SSH_FXP_STATUS, - sftp->rmdir_request_id, &data, &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + sftp->rmdir_request_id, &data, &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (rc) { + } + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP rmdir packet too short"); + } + else if(rc) { sftp->rmdir_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Error waiting for FXP STATUS"); @@ -3148,9 +3408,10 @@ static int sftp_rmdir(LIBSSH2_SFTP *sftp, const char *path, retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode == LIBSSH2_FX_OK) { + if(retcode == LIBSSH2_FX_OK) { return 0; - } else { + } + else { sftp->last_errno = retcode; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "SFTP Protocol Error"); @@ -3192,13 +3453,13 @@ static int sftp_stat(LIBSSH2_SFTP *sftp, const char *path, { SSH_FXP_ATTRS, SSH_FXP_STATUS }; int rc; - if (sftp->stat_state == libssh2_NB_state_idle) { + if(sftp->stat_state == libssh2_NB_state_idle) { _libssh2_debug(session, LIBSSH2_TRACE_SFTP, "%s %s", (stat_type == LIBSSH2_SFTP_SETSTAT) ? "Set-statting" : (stat_type == LIBSSH2_SFTP_LSTAT ? "LStatting" : "Statting"), path); s = sftp->stat_packet = LIBSSH2_ALLOC(session, packet_len); - if (!sftp->stat_packet) { + if(!sftp->stat_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for FXP_*STAT " "packet"); @@ -3206,7 +3467,7 @@ static int sftp_stat(LIBSSH2_SFTP *sftp, const char *path, _libssh2_store_u32(&s, packet_len - 4); - switch (stat_type) { + switch(stat_type) { case LIBSSH2_SFTP_SETSTAT: *(s++) = SSH_FXP_SETSTAT; break; @@ -3223,17 +3484,18 @@ static int sftp_stat(LIBSSH2_SFTP *sftp, const char *path, _libssh2_store_u32(&s, sftp->stat_request_id); _libssh2_store_str(&s, path, path_len); - if (stat_type == LIBSSH2_SFTP_SETSTAT) + if(stat_type == LIBSSH2_SFTP_SETSTAT) s += sftp_attr2bin(s, attrs); sftp->stat_state = libssh2_NB_state_created; } - if (sftp->stat_state == libssh2_NB_state_created) { + if(sftp->stat_state == libssh2_NB_state_created) { rc = _libssh2_channel_write(channel, 0, sftp->stat_packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return rc; - } else if (packet_len != rc) { + } + else if(packet_len != rc) { LIBSSH2_FREE(session, sftp->stat_packet); sftp->stat_packet = NULL; sftp->stat_state = libssh2_NB_state_idle; @@ -3247,10 +3509,17 @@ static int sftp_stat(LIBSSH2_SFTP *sftp, const char *path, } rc = sftp_packet_requirev(sftp, 2, stat_responses, - sftp->stat_request_id, &data, &data_len); - if (rc == LIBSSH2_ERROR_EAGAIN) + sftp->stat_request_id, &data, &data_len, 9); + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; - else if (rc) { + else if(rc == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP stat packet too short"); + } + else if(rc) { sftp->stat_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Timeout waiting for status message"); @@ -3258,14 +3527,16 @@ static int sftp_stat(LIBSSH2_SFTP *sftp, const char *path, sftp->stat_state = libssh2_NB_state_idle; - if (data[0] == SSH_FXP_STATUS) { + if(data[0] == SSH_FXP_STATUS) { int retcode; retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode == LIBSSH2_FX_OK) { + if(retcode == LIBSSH2_FX_OK) { + memset(attrs, 0, sizeof(LIBSSH2_SFTP_ATTRIBUTES)); return 0; - } else { + } + else { sftp->last_errno = retcode; return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "SFTP Protocol Error"); @@ -3273,7 +3544,12 @@ static int sftp_stat(LIBSSH2_SFTP *sftp, const char *path, } memset(attrs, 0, sizeof(LIBSSH2_SFTP_ATTRIBUTES)); - sftp_bin2attr(attrs, data + 5); + if(sftp_bin2attr(attrs, data + 5, data_len - 5) < 0) { + LIBSSH2_FREE(session, data); + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "Attributes too short in SFTP fstat"); + } + LIBSSH2_FREE(session, data); return 0; @@ -3314,14 +3590,14 @@ static int sftp_symlink(LIBSSH2_SFTP *sftp, const char *path, { SSH_FXP_NAME, SSH_FXP_STATUS }; int retcode; - if ((sftp->version < 3) && (link_type != LIBSSH2_SFTP_REALPATH)) { + if((sftp->version < 3) && (link_type != LIBSSH2_SFTP_REALPATH)) { return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "Server does not support SYMLINK or READLINK"); } - if (sftp->symlink_state == libssh2_NB_state_idle) { + if(sftp->symlink_state == libssh2_NB_state_idle) { s = sftp->symlink_packet = LIBSSH2_ALLOC(session, packet_len); - if (!sftp->symlink_packet) { + if(!sftp->symlink_packet) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "SYMLINK/READLINK/REALPATH packet"); @@ -3335,7 +3611,7 @@ static int sftp_symlink(LIBSSH2_SFTP *sftp, const char *path, _libssh2_store_u32(&s, packet_len - 4); - switch (link_type) { + switch(link_type) { case LIBSSH2_SFTP_REALPATH: *(s++) = SSH_FXP_REALPATH; break; @@ -3352,18 +3628,18 @@ static int sftp_symlink(LIBSSH2_SFTP *sftp, const char *path, _libssh2_store_u32(&s, sftp->symlink_request_id); _libssh2_store_str(&s, path, path_len); - if (link_type == LIBSSH2_SFTP_SYMLINK) + if(link_type == LIBSSH2_SFTP_SYMLINK) _libssh2_store_str(&s, target, target_len); sftp->symlink_state = libssh2_NB_state_created; } - if (sftp->symlink_state == libssh2_NB_state_created) { + if(sftp->symlink_state == libssh2_NB_state_created) { ssize_t rc = _libssh2_channel_write(channel, 0, sftp->symlink_packet, packet_len); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; - else if (packet_len != rc) { + else if(packet_len != rc) { LIBSSH2_FREE(session, sftp->symlink_packet); sftp->symlink_packet = NULL; sftp->symlink_state = libssh2_NB_state_idle; @@ -3378,10 +3654,17 @@ static int sftp_symlink(LIBSSH2_SFTP *sftp, const char *path, retcode = sftp_packet_requirev(sftp, 2, link_responses, sftp->symlink_request_id, &data, - &data_len); - if (retcode == LIBSSH2_ERROR_EAGAIN) + &data_len, 9); + if(retcode == LIBSSH2_ERROR_EAGAIN) return retcode; - else if (retcode) { + else if(retcode == LIBSSH2_ERROR_BUFFER_TOO_SMALL) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP symlink packet too short"); + } + else if(retcode) { sftp->symlink_state = libssh2_NB_state_idle; return _libssh2_error(session, retcode, "Error waiting for status message"); @@ -3389,12 +3672,10 @@ static int sftp_symlink(LIBSSH2_SFTP *sftp, const char *path, sftp->symlink_state = libssh2_NB_state_idle; - if (data[0] == SSH_FXP_STATUS) { - int retcode; - + if(data[0] == SSH_FXP_STATUS) { retcode = _libssh2_ntohu32(data + 5); LIBSSH2_FREE(session, data); - if (retcode == LIBSSH2_FX_OK) + if(retcode == LIBSSH2_FX_OK) return LIBSSH2_ERROR_NONE; else { sftp->last_errno = retcode; @@ -3403,16 +3684,24 @@ static int sftp_symlink(LIBSSH2_SFTP *sftp, const char *path, } } - if (_libssh2_ntohu32(data + 5) < 1) { + if(_libssh2_ntohu32(data + 5) < 1) { LIBSSH2_FREE(session, data); return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, "Invalid READLINK/REALPATH response, " "no name entries"); } + if(data_len < 13) { + if(data_len > 0) { + LIBSSH2_FREE(session, data); + } + return _libssh2_error(session, LIBSSH2_ERROR_SFTP_PROTOCOL, + "SFTP stat packet too short"); + } + /* this reads a u32 and stores it into a signed 32bit value */ link_len = _libssh2_ntohu32(data + 9); - if (link_len < target_len) { + if(link_len < target_len) { memcpy(target, data + 13, link_len); target[link_len] = 0; retcode = (int)link_len; @@ -3459,7 +3748,7 @@ libssh2_sftp_last_error(LIBSSH2_SFTP *sftp) LIBSSH2_API LIBSSH2_CHANNEL * libssh2_sftp_get_channel(LIBSSH2_SFTP *sftp) { - if (!sftp) + if(!sftp) return NULL; return sftp->channel; diff --git a/vendor/libssh2/src/sftp.h b/vendor/libssh2/src/sftp.h index 2ed32cea6d..129b8f085f 100644 --- a/vendor/libssh2/src/sftp.h +++ b/vendor/libssh2/src/sftp.h @@ -1,5 +1,5 @@ -#ifndef _LIBSSH2_SFTP_H -#define _LIBSSH2_SFTP_H +#ifndef __LIBSSH2_SFTP_H +#define __LIBSSH2_SFTP_H /* * Copyright (C) 2010 - 2012 by Daniel Stenberg * Author: Daniel Stenberg @@ -122,6 +122,7 @@ struct _LIBSSH2_SFTP_HANDLE uint32_t names_left; void *names_packet; char *next_name; + size_t names_packet_len; } dir; } u; @@ -234,4 +235,4 @@ struct _LIBSSH2_SFTP uint32_t symlink_request_id; }; -#endif +#endif /* __LIBSSH2_SFTP_H */ diff --git a/vendor/libssh2/src/transport.c b/vendor/libssh2/src/transport.c index 8725da0950..17af3e4da1 100644 --- a/vendor/libssh2/src/transport.c +++ b/vendor/libssh2/src/transport.c @@ -65,16 +65,16 @@ debugdump(LIBSSH2_SESSION * session, unsigned int width = 0x10; char buffer[256]; /* Must be enough for width*4 + about 30 or so */ size_t used; - static const char* hex_chars = "0123456789ABCDEF"; + static const char *hex_chars = "0123456789ABCDEF"; - if (!(session->showmask & LIBSSH2_TRACE_TRANS)) { + if(!(session->showmask & LIBSSH2_TRACE_TRANS)) { /* not asked for, bail out */ return; } used = snprintf(buffer, sizeof(buffer), "=> %s (%d bytes)\n", desc, (int) size); - if (session->tracehandler) + if(session->tracehandler) (session->tracehandler)(session, session->tracehandler_context, buffer, used); else @@ -86,9 +86,9 @@ debugdump(LIBSSH2_SESSION * session, /* hex not disabled, show it */ for(c = 0; c < width; c++) { - if (i + c < size) { - buffer[used++] = hex_chars[(ptr[i+c] >> 4) & 0xF]; - buffer[used++] = hex_chars[ptr[i+c] & 0xF]; + if(i + c < size) { + buffer[used++] = hex_chars[(ptr[i + c] >> 4) & 0xF]; + buffer[used++] = hex_chars[ptr[i + c] & 0xF]; } else { buffer[used++] = ' '; @@ -96,7 +96,7 @@ debugdump(LIBSSH2_SESSION * session, } buffer[used++] = ' '; - if ((width/2) - 1 == c) + if((width/2) - 1 == c) buffer[used++] = ' '; } @@ -110,7 +110,7 @@ debugdump(LIBSSH2_SESSION * session, buffer[used++] = '\n'; buffer[used] = 0; - if (session->tracehandler) + if(session->tracehandler) (session->tracehandler)(session, session->tracehandler_context, buffer, used); else @@ -138,8 +138,8 @@ decrypt(LIBSSH2_SESSION * session, unsigned char *source, we risk losing those extra bytes */ assert((len % blocksize) == 0); - while (len >= blocksize) { - if (session->remote.crypt->crypt(session, source, blocksize, + while(len >= blocksize) { + if(session->remote.crypt->crypt(session, source, blocksize, &session->remote.crypt_abstract)) { LIBSSH2_FREE(session, p->payload); return LIBSSH2_ERROR_DECRYPT; @@ -169,11 +169,11 @@ fullpacket(LIBSSH2_SESSION * session, int encrypted /* 1 or 0 */ ) int rc; int compressed; - if (session->fullpacket_state == libssh2_NB_state_idle) { + if(session->fullpacket_state == libssh2_NB_state_idle) { session->fullpacket_macstate = LIBSSH2_MAC_CONFIRMED; session->fullpacket_payload_len = p->packet_length - 1; - if (encrypted) { + if(encrypted) { /* Calculate MAC hash */ session->remote.mac->hash(session, macbuf, /* store hash here */ @@ -188,7 +188,7 @@ fullpacket(LIBSSH2_SESSION * session, int encrypted /* 1 or 0 */ ) * buffer. Note that 'payload_len' here is the packet_length * field which includes the padding but not the MAC. */ - if (memcmp(macbuf, p->payload + session->fullpacket_payload_len, + if(memcmp(macbuf, p->payload + session->fullpacket_payload_len, session->remote.mac->mac_len)) { session->fullpacket_macstate = LIBSSH2_MAC_INVALID; } @@ -206,7 +206,7 @@ fullpacket(LIBSSH2_SESSION * session, int encrypted /* 1 or 0 */ ) ((session->state & LIBSSH2_STATE_AUTHENTICATED) || session->local.comp->use_in_auth); - if (compressed && session->remote.comp_abstract) { + if(compressed && session->remote.comp_abstract) { /* * The buffer for the decompression (remote.comp_abstract) is * initialised in time when it is needed so as long it is NULL we @@ -237,13 +237,13 @@ fullpacket(LIBSSH2_SESSION * session, int encrypted /* 1 or 0 */ ) session->fullpacket_state = libssh2_NB_state_created; } - if (session->fullpacket_state == libssh2_NB_state_created) { + if(session->fullpacket_state == libssh2_NB_state_created) { rc = _libssh2_packet_add(session, p->payload, session->fullpacket_payload_len, session->fullpacket_macstate); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return rc; - if (rc) { + if(rc) { session->fullpacket_state = libssh2_NB_state_idle; return rc; } @@ -281,7 +281,6 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) unsigned char block[MAX_BLOCKSIZE]; int blocksize; int encrypted = 1; - size_t total_num; /* default clear the bit */ session->socket_block_directions &= ~LIBSSH2_SESSION_BLOCK_INBOUND; @@ -298,7 +297,7 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) * of packet_read, then don't redirect, as that would be an infinite loop! */ - if (session->state & LIBSSH2_STATE_EXCHANGING_KEYS && + if(session->state & LIBSSH2_STATE_EXCHANGING_KEYS && !(session->state & LIBSSH2_STATE_KEX_ACTIVE)) { /* Whoever wants a packet won't get anything until the key re-exchange @@ -307,7 +306,7 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Redirecting into the" " key re-exchange from _libssh2_transport_read"); rc = _libssh2_kex_exchange(session, 1, &session->startup_key_state); - if (rc) + if(rc) return rc; } @@ -316,20 +315,21 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) * I know this is very ugly and not a really good use of "goto", but * this case statement would be even uglier to do it any other way */ - if (session->readPack_state == libssh2_NB_state_jump1) { + if(session->readPack_state == libssh2_NB_state_jump1) { session->readPack_state = libssh2_NB_state_idle; encrypted = session->readPack_encrypted; goto libssh2_transport_read_point1; } do { - if (session->socket_state == LIBSSH2_SOCKET_DISCONNECTED) { - return LIBSSH2_ERROR_NONE; + if(session->socket_state == LIBSSH2_SOCKET_DISCONNECTED) { + return LIBSSH2_ERROR_SOCKET_DISCONNECT; } - if (session->state & LIBSSH2_STATE_NEWKEYS) { + if(session->state & LIBSSH2_STATE_NEWKEYS) { blocksize = session->remote.crypt->blocksize; - } else { + } + else { encrypted = 0; /* not encrypted */ blocksize = 5; /* not strictly true, but we can use 5 here to make the checks below work fine still */ @@ -348,18 +348,19 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) /* if remainbuf turns negative we have a bad internal error */ assert(remainbuf >= 0); - if (remainbuf < blocksize) { + if(remainbuf < blocksize) { /* If we have less than a blocksize left, it is too little data to deal with, read more */ ssize_t nread; /* move any remainder to the start of the buffer so that we can do a full refill */ - if (remainbuf) { + if(remainbuf) { memmove(p->buf, &p->buf[p->readidx], remainbuf); p->readidx = 0; p->writeidx = remainbuf; - } else { + } + else { /* nothing to move, just zero the indexes */ p->readidx = p->writeidx = 0; } @@ -369,10 +370,10 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) LIBSSH2_RECV(session, &p->buf[remainbuf], PACKETBUFSIZE - remainbuf, LIBSSH2_SOCKET_RECV_FLAGS(session)); - if (nread <= 0) { + if(nread <= 0) { /* check if this is due to EAGAIN and return the special return code if so, error out normally otherwise */ - if ((nread < 0) && (nread == -EAGAIN)) { + if((nread < 0) && (nread == -EAGAIN)) { session->socket_block_directions |= LIBSSH2_SESSION_BLOCK_INBOUND; return LIBSSH2_ERROR_EAGAIN; @@ -398,12 +399,14 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) /* how much data to deal with from the buffer */ numbytes = remainbuf; - if (!p->total_num) { + if(!p->total_num) { + size_t total_num; + /* No payload package area allocated yet. To know the size of this payload, we need to decrypt the first blocksize data. */ - if (numbytes < blocksize) { + if(numbytes < blocksize) { /* we can't act on anything less than blocksize, but this check is only done for the initial block since once we have got the start of a block we can in fact deal with fractions @@ -413,15 +416,16 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) return LIBSSH2_ERROR_EAGAIN; } - if (encrypted) { + if(encrypted) { rc = decrypt(session, &p->buf[p->readidx], block, blocksize); - if (rc != LIBSSH2_ERROR_NONE) { + if(rc != LIBSSH2_ERROR_NONE) { return rc; } /* save the first 5 bytes of the decrypted package, to be used in the hash calculation later down. */ - memcpy(p->init, &p->buf[p->readidx], 5); - } else { + memcpy(p->init, block, 5); + } + else { /* the data is plain, just copy it verbatim to the working block buffer */ memcpy(block, &p->buf[p->readidx], blocksize); @@ -434,10 +438,18 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) * and we can extract packet and padding length from it */ p->packet_length = _libssh2_ntohu32(block); - if (p->packet_length < 1) + if(p->packet_length < 1) { return LIBSSH2_ERROR_DECRYPT; + } + else if(p->packet_length > LIBSSH2_PACKET_MAXPAYLOAD) { + return LIBSSH2_ERROR_OUT_OF_BOUNDARY; + } p->padding_length = block[4]; + if(p->padding_length > p->packet_length - 1) { + return LIBSSH2_ERROR_DECRYPT; + } + /* total_num is the number of bytes following the initial (5 bytes) packet length and padding length fields */ @@ -453,26 +465,33 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) * or less (including length, padding length, payload, * padding, and MAC.)." */ - if (total_num > LIBSSH2_PACKET_MAXPAYLOAD) { + if(total_num > LIBSSH2_PACKET_MAXPAYLOAD || total_num == 0) { return LIBSSH2_ERROR_OUT_OF_BOUNDARY; } /* Get a packet handle put data into. We get one to hold all data, including padding and MAC. */ p->payload = LIBSSH2_ALLOC(session, total_num); - if (!p->payload) { + if(!p->payload) { return LIBSSH2_ERROR_ALLOC; } p->total_num = total_num; /* init write pointer to start of payload buffer */ p->wptr = p->payload; - if (blocksize > 5) { + if(blocksize > 5) { /* copy the data from index 5 to the end of the blocksize from the temporary buffer to the start of the decrypted buffer */ - memcpy(p->wptr, &block[5], blocksize - 5); - p->wptr += blocksize - 5; /* advance write pointer */ + if(blocksize - 5 <= (int) total_num) { + memcpy(p->wptr, &block[5], blocksize - 5); + p->wptr += blocksize - 5; /* advance write pointer */ + } + else { + if(p->payload) + LIBSSH2_FREE(session, p->payload); + return LIBSSH2_ERROR_OUT_OF_BOUNDARY; + } } /* init the data_num field to the number of bytes of @@ -487,13 +506,13 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) package */ remainpack = p->total_num - p->data_num; - if (numbytes > remainpack) { + if(numbytes > remainpack) { /* if we have more data in the buffer than what is going into this particular packet, we limit this round to this packet only */ numbytes = remainpack; } - if (encrypted) { + if(encrypted) { /* At the end of the incoming stream, there is a MAC, and we don't want to decrypt that since we need it "raw". We MUST however decrypt the padding data @@ -503,13 +522,14 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) /* if what we have plus numbytes is bigger than the total minus the skip margin, we should lower the amount to decrypt even more */ - if ((p->data_num + numbytes) > (p->total_num - skip)) { + if((p->data_num + numbytes) > (p->total_num - skip)) { numdecrypt = (p->total_num - skip) - p->data_num; - } else { + } + else { int frac; numdecrypt = numbytes; frac = numdecrypt % blocksize; - if (frac) { + if(frac) { /* not an aligned amount of blocks, align it */ numdecrypt -= frac; @@ -518,16 +538,17 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) numbytes = 0; } } - } else { + } + else { /* unencrypted data should not be decrypted at all */ numdecrypt = 0; } /* if there are bytes to decrypt, do that */ - if (numdecrypt > 0) { + if(numdecrypt > 0) { /* now decrypt the lot */ rc = decrypt(session, &p->buf[p->readidx], p->wptr, numdecrypt); - if (rc != LIBSSH2_ERROR_NONE) { + if(rc != LIBSSH2_ERROR_NONE) { p->total_num = 0; /* no packet buffer available */ return rc; } @@ -545,8 +566,16 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) /* if there are bytes to copy that aren't decrypted, simply copy them as-is to the target buffer */ - if (numbytes > 0) { - memcpy(p->wptr, &p->buf[p->readidx], numbytes); + if(numbytes > 0) { + + if(numbytes <= (int)(p->total_num - (p->wptr - p->payload))) { + memcpy(p->wptr, &p->buf[p->readidx], numbytes); + } + else { + if(p->payload) + LIBSSH2_FREE(session, p->payload); + return LIBSSH2_ERROR_OUT_OF_BOUNDARY; + } /* advance the read pointer */ p->readidx += numbytes; @@ -560,21 +589,21 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) current packet */ remainpack = p->total_num - p->data_num; - if (!remainpack) { + if(!remainpack) { /* we have a full packet */ libssh2_transport_read_point1: rc = fullpacket(session, encrypted); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { - if (session->packAdd_state != libssh2_NB_state_idle) - { + if(session->packAdd_state != libssh2_NB_state_idle) { /* fullpacket only returns LIBSSH2_ERROR_EAGAIN if - * libssh2_packet_add returns LIBSSH2_ERROR_EAGAIN. If that - * returns LIBSSH2_ERROR_EAGAIN but the packAdd_state is idle, - * then the packet has been added to the brigade, but some - * immediate action that was taken based on the packet - * type (such as key re-exchange) is not yet complete. - * Clear the way for a new packet to be read in. + * libssh2_packet_add returns LIBSSH2_ERROR_EAGAIN. If + * that returns LIBSSH2_ERROR_EAGAIN but the packAdd_state + * is idle, then the packet has been added to the brigade, + * but some immediate action that was taken based on the + * packet type (such as key re-exchange) is not yet + * complete. Clear the way for a new packet to be read + * in. */ session->readPack_encrypted = encrypted; session->readPack_state = libssh2_NB_state_jump1; @@ -587,7 +616,7 @@ int _libssh2_transport_read(LIBSSH2_SESSION * session) return rc; } - } while (1); /* loop */ + } while(1); /* loop */ return LIBSSH2_ERROR_SOCKET_RECV; /* we never reach this point */ } @@ -600,13 +629,13 @@ send_existing(LIBSSH2_SESSION *session, const unsigned char *data, ssize_t length; struct transportpacket *p = &session->packet; - if (!p->olen) { + if(!p->olen) { *ret = 0; return LIBSSH2_ERROR_NONE; } /* send as much as possible of the existing packet */ - if ((data != p->odata) || (data_len != p->olen)) { + if((data != p->odata) || (data_len != p->olen)) { /* When we are about to complete the sending of a packet, it is vital that the caller doesn't try to send a new/different packet since we don't add this one up until the previous one has been sent. To @@ -622,7 +651,7 @@ send_existing(LIBSSH2_SESSION *session, const unsigned char *data, rc = LIBSSH2_SEND(session, &p->outbuf[p->osent], length, LIBSSH2_SOCKET_SEND_FLAGS(session)); - if (rc < 0) + if(rc < 0) _libssh2_debug(session, LIBSSH2_TRACE_SOCKET, "Error sending %d bytes: %d", length, -rc); else { @@ -633,7 +662,7 @@ send_existing(LIBSSH2_SESSION *session, const unsigned char *data, &p->outbuf[p->osent], rc); } - if (rc == length) { + if(rc == length) { /* the remainder of the package was sent */ p->ototal_num = 0; p->olen = 0; @@ -643,9 +672,9 @@ send_existing(LIBSSH2_SESSION *session, const unsigned char *data, return LIBSSH2_ERROR_NONE; } - else if (rc < 0) { + else if(rc < 0) { /* nothing was sent */ - if (rc != -EAGAIN) + if(rc != -EAGAIN) /* send failure! */ return LIBSSH2_ERROR_SOCKET_SEND; @@ -705,14 +734,14 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, * * See the similar block in _libssh2_transport_read for more details. */ - if (session->state & LIBSSH2_STATE_EXCHANGING_KEYS && + if(session->state & LIBSSH2_STATE_EXCHANGING_KEYS && !(session->state & LIBSSH2_STATE_KEX_ACTIVE)) { /* Don't write any new packets if we're still in the middle of a key * exchange. */ _libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Redirecting into the" " key re-exchange from _libssh2_transport_send"); rc = _libssh2_kex_exchange(session, 1, &session->startup_key_state); - if (rc) + if(rc) return rc; } @@ -723,12 +752,12 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, /* FIRST, check if we have a pending write to complete. send_existing only sanity-check data and data_len and not data2 and data2_len!! */ rc = send_existing(session, data, data_len, &ret); - if (rc) + if(rc) return rc; session->socket_block_directions &= ~LIBSSH2_SESSION_BLOCK_OUTBOUND; - if (ret) + if(ret) /* set by send_existing if data was sent */ return rc; @@ -740,7 +769,7 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, ((session->state & LIBSSH2_STATE_AUTHENTICATED) || session->local.comp->use_in_auth); - if (encrypted && compressed) { + if(encrypted && compressed && session->local.comp_abstract) { /* the idea here is that these function must fail if the output gets larger than what fits in the assigned buffer so thus they don't check the input size as we don't know how much it compresses */ @@ -761,7 +790,8 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, dest2_len -= dest_len; rc = session->local.comp->comp(session, - &p->outbuf[5+dest_len], &dest2_len, + &p->outbuf[5 + dest_len], + &dest2_len, data2, data2_len, &session->local.comp_abstract); } @@ -781,7 +811,7 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, /* copy the payload data */ memcpy(&p->outbuf[5], data, data_len); if(data2 && data2_len) - memcpy(&p->outbuf[5+data_len], data2, data2_len); + memcpy(&p->outbuf[5 + data_len], data2, data2_len); data_len += data2_len; /* use the combined length */ } @@ -805,7 +835,7 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, /* if the padding becomes too small we add another blocksize worth of it (taken from the original libssh2 where it didn't have any real explanation) */ - if (padding_length < 4) { + if(padding_length < 4) { padding_length += blocksize; } #ifdef RANDOM_PADDING @@ -832,9 +862,12 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, p->outbuf[4] = (unsigned char)padding_length; /* fill the padding area with random junk */ - _libssh2_random(p->outbuf + 5 + data_len, padding_length); + if(_libssh2_random(p->outbuf + 5 + data_len, padding_length)) { + return _libssh2_error(session, LIBSSH2_ERROR_RANDGEN, + "Unable to get random bytes for packet padding"); + } - if (encrypted) { + if(encrypted) { size_t i; /* Calculate MAC hash. Put the output at index packet_length, @@ -850,7 +883,7 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, The MAC field is not encrypted. */ for(i = 0; i < packet_length; i += session->local.crypt->blocksize) { unsigned char *ptr = &p->outbuf[i]; - if (session->local.crypt->crypt(session, ptr, + if(session->local.crypt->crypt(session, ptr, session->local.crypt->blocksize, &session->local.crypt_abstract)) return LIBSSH2_ERROR_ENCRYPT; /* encryption failure */ @@ -861,7 +894,7 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, ret = LIBSSH2_SEND(session, p->outbuf, total_length, LIBSSH2_SOCKET_SEND_FLAGS(session)); - if (ret < 0) + if(ret < 0) _libssh2_debug(session, LIBSSH2_TRACE_SOCKET, "Error sending %d bytes: %d", total_length, -ret); else { @@ -870,8 +903,8 @@ int _libssh2_transport_send(LIBSSH2_SESSION *session, debugdump(session, "libssh2_transport_write send()", p->outbuf, ret); } - if (ret != total_length) { - if (ret >= 0 || ret == -EAGAIN) { + if(ret != total_length) { + if(ret >= 0 || ret == -EAGAIN) { /* the whole packet could not be sent, save the rest */ session->socket_block_directions |= LIBSSH2_SESSION_BLOCK_OUTBOUND; p->odata = orgdata; diff --git a/vendor/libssh2/src/transport.h b/vendor/libssh2/src/transport.h index 89982a67f0..7d395d0e78 100644 --- a/vendor/libssh2/src/transport.h +++ b/vendor/libssh2/src/transport.h @@ -1,6 +1,5 @@ #ifndef __LIBSSH2_TRANSPORT_H #define __LIBSSH2_TRANSPORT_H - /* Copyright (C) 2007 The Written Word, Inc. All rights reserved. * Copyright (C) 2009-2010 by Daniel Stenberg * Author: Daniel Stenberg diff --git a/vendor/libssh2/src/userauth.c b/vendor/libssh2/src/userauth.c index cdfa25e663..e5a270c6db 100644 --- a/vendor/libssh2/src/userauth.c +++ b/vendor/libssh2/src/userauth.c @@ -63,15 +63,17 @@ static char *userauth_list(LIBSSH2_SESSION *session, const char *username, unsigned int username_len) { - static const unsigned char reply_codes[3] = - { SSH_MSG_USERAUTH_SUCCESS, SSH_MSG_USERAUTH_FAILURE, 0 }; + unsigned char reply_codes[4] = + { SSH_MSG_USERAUTH_SUCCESS, SSH_MSG_USERAUTH_FAILURE, + SSH_MSG_USERAUTH_BANNER, 0 }; /* packet_type(1) + username_len(4) + service_len(4) + service(14)"ssh-connection" + method_len(4) = 27 */ unsigned long methods_len; + unsigned int banner_len; unsigned char *s; int rc; - if (session->userauth_list_state == libssh2_NB_state_idle) { + if(session->userauth_list_state == libssh2_NB_state_idle) { /* Zero the whole thing out */ memset(&session->userauth_list_packet_requirev_state, 0, sizeof(session->userauth_list_packet_requirev_state)); @@ -80,7 +82,7 @@ static char *userauth_list(LIBSSH2_SESSION *session, const char *username, s = session->userauth_list_data = LIBSSH2_ALLOC(session, session->userauth_list_data_len); - if (!session->userauth_list_data) { + if(!session->userauth_list_data) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for userauth_list"); return NULL; @@ -94,11 +96,11 @@ static char *userauth_list(LIBSSH2_SESSION *session, const char *username, session->userauth_list_state = libssh2_NB_state_created; } - if (session->userauth_list_state == libssh2_NB_state_created) { + if(session->userauth_list_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, session->userauth_list_data, session->userauth_list_data_len, (unsigned char *)"none", 4); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block requesting userauth list"); return NULL; @@ -107,7 +109,7 @@ static char *userauth_list(LIBSSH2_SESSION *session, const char *username, LIBSSH2_FREE(session, session->userauth_list_data); session->userauth_list_data = NULL; - if (rc) { + if(rc) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send userauth-none request"); session->userauth_list_state = libssh2_NB_state_idle; @@ -117,23 +119,75 @@ static char *userauth_list(LIBSSH2_SESSION *session, const char *username, session->userauth_list_state = libssh2_NB_state_sent; } - if (session->userauth_list_state == libssh2_NB_state_sent) { + if(session->userauth_list_state == libssh2_NB_state_sent) { rc = _libssh2_packet_requirev(session, reply_codes, &session->userauth_list_data, &session->userauth_list_data_len, 0, NULL, 0, - &session->userauth_list_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &session->userauth_list_packet_requirev_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block requesting userauth list"); return NULL; - } else if (rc) { + } + else if(rc || (session->userauth_list_data_len < 1)) { _libssh2_error(session, rc, "Failed getting response"); session->userauth_list_state = libssh2_NB_state_idle; return NULL; } - if (session->userauth_list_data[0] == SSH_MSG_USERAUTH_SUCCESS) { + if(session->userauth_list_data[0] == SSH_MSG_USERAUTH_BANNER) { + if(session->userauth_list_data_len < 5) { + LIBSSH2_FREE(session, session->userauth_list_data); + session->userauth_list_data = NULL; + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet size"); + return NULL; + } + banner_len = _libssh2_ntohu32(session->userauth_list_data + 1); + if(banner_len >= session->userauth_list_data_len - 5) { + LIBSSH2_FREE(session, session->userauth_list_data); + session->userauth_list_data = NULL; + _libssh2_error(session, LIBSSH2_ERROR_OUT_OF_BOUNDARY, + "Unexpected userauth banner size"); + return NULL; + } + session->userauth_banner = LIBSSH2_ALLOC(session, banner_len); + if(!session->userauth_banner) { + LIBSSH2_FREE(session, session->userauth_list_data); + session->userauth_list_data = NULL; + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for userauth_banner"); + return NULL; + } + memmove(session->userauth_banner, session->userauth_list_data + 5, + banner_len); + session->userauth_banner[banner_len] = '\0'; + _libssh2_debug(session, LIBSSH2_TRACE_AUTH, + "Banner: %s", + session->userauth_banner); + LIBSSH2_FREE(session, session->userauth_list_data); + session->userauth_list_data = NULL; + /* SSH_MSG_USERAUTH_BANNER has been handled */ + reply_codes[2] = 0; + rc = _libssh2_packet_requirev(session, reply_codes, + &session->userauth_list_data, + &session->userauth_list_data_len, 0, + NULL, 0, + &session->userauth_list_packet_requirev_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { + _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block requesting userauth list"); + return NULL; + } + else if(rc || (session->userauth_list_data_len < 1)) { + _libssh2_error(session, rc, "Failed getting response"); + session->userauth_list_state = libssh2_NB_state_idle; + return NULL; + } + } + + if(session->userauth_list_data[0] == SSH_MSG_USERAUTH_SUCCESS) { /* Wow, who'dve thought... */ _libssh2_error(session, LIBSSH2_ERROR_NONE, "No error"); LIBSSH2_FREE(session, session->userauth_list_data); @@ -143,7 +197,20 @@ static char *userauth_list(LIBSSH2_SESSION *session, const char *username, return NULL; } + if(session->userauth_list_data_len < 5) { + LIBSSH2_FREE(session, session->userauth_list_data); + session->userauth_list_data = NULL; + _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet size"); + return NULL; + } + methods_len = _libssh2_ntohu32(session->userauth_list_data + 1); + if(methods_len >= session->userauth_list_data_len - 5) { + _libssh2_error(session, LIBSSH2_ERROR_OUT_OF_BOUNDARY, + "Unexpected userauth list size"); + return NULL; + } /* Do note that the memory areas overlap! */ memmove(session->userauth_list_data, session->userauth_list_data + 5, @@ -175,6 +242,30 @@ libssh2_userauth_list(LIBSSH2_SESSION * session, const char *user, return ptr; } +/* libssh2_userauth_banner + * + * Retrieve banner message from server, if available. + * When no such message is sent by server or if no authentication attempt has + * been made, this function returns LIBSSH2_ERROR_MISSING_AUTH_BANNER. + */ +LIBSSH2_API int +libssh2_userauth_banner(LIBSSH2_SESSION *session, char **banner) +{ + if(NULL == session) + return LIBSSH2_ERROR_MISSING_USERAUTH_BANNER; + + if(!session->userauth_banner) { + return _libssh2_error(session, + LIBSSH2_ERROR_MISSING_USERAUTH_BANNER, + "Missing userauth banner"); + } + + if(banner != NULL) + *banner = session->userauth_banner; + + return LIBSSH2_ERROR_NONE; +} + /* * libssh2_userauth_authenticated * @@ -205,7 +296,7 @@ userauth_password(LIBSSH2_SESSION *session, }; int rc; - if (session->userauth_pswd_state == libssh2_NB_state_idle) { + if(session->userauth_pswd_state == libssh2_NB_state_idle) { /* Zero the whole thing out */ memset(&session->userauth_pswd_packet_requirev_state, 0, sizeof(session->userauth_pswd_packet_requirev_state)); @@ -223,7 +314,7 @@ userauth_password(LIBSSH2_SESSION *session, struct */ s = session->userauth_pswd_data = LIBSSH2_ALLOC(session, session->userauth_pswd_data_len); - if (!session->userauth_pswd_data) { + if(!session->userauth_pswd_data) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "userauth-password request"); @@ -243,11 +334,11 @@ userauth_password(LIBSSH2_SESSION *session, session->userauth_pswd_state = libssh2_NB_state_created; } - if (session->userauth_pswd_state == libssh2_NB_state_created) { + if(session->userauth_pswd_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, session->userauth_pswd_data, session->userauth_pswd_data_len, password, password_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block writing password request"); } @@ -256,7 +347,7 @@ userauth_password(LIBSSH2_SESSION *session, LIBSSH2_FREE(session, session->userauth_pswd_data); session->userauth_pswd_data = NULL; - if (rc) { + if(rc) { session->userauth_pswd_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send userauth-password request"); @@ -267,10 +358,10 @@ userauth_password(LIBSSH2_SESSION *session, password_response: - if ((session->userauth_pswd_state == libssh2_NB_state_sent) + if((session->userauth_pswd_state == libssh2_NB_state_sent) || (session->userauth_pswd_state == libssh2_NB_state_sent1) || (session->userauth_pswd_state == libssh2_NB_state_sent2)) { - if (session->userauth_pswd_state == libssh2_NB_state_sent) { + if(session->userauth_pswd_state == libssh2_NB_state_sent) { rc = _libssh2_packet_requirev(session, reply_codes, &session->userauth_pswd_data, &session->userauth_pswd_data_len, @@ -278,15 +369,20 @@ userauth_password(LIBSSH2_SESSION *session, &session-> userauth_pswd_packet_requirev_state); - if (rc) { - if (rc != LIBSSH2_ERROR_EAGAIN) + if(rc) { + if(rc != LIBSSH2_ERROR_EAGAIN) session->userauth_pswd_state = libssh2_NB_state_idle; return _libssh2_error(session, rc, "Waiting for password response"); } + else if(session->userauth_pswd_data_len < 1) { + session->userauth_pswd_state = libssh2_NB_state_idle; + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet size"); + } - if (session->userauth_pswd_data[0] == SSH_MSG_USERAUTH_SUCCESS) { + if(session->userauth_pswd_data[0] == SSH_MSG_USERAUTH_SUCCESS) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Password authentication successful"); LIBSSH2_FREE(session, session->userauth_pswd_data); @@ -294,7 +390,9 @@ userauth_password(LIBSSH2_SESSION *session, session->state |= LIBSSH2_STATE_AUTHENTICATED; session->userauth_pswd_state = libssh2_NB_state_idle; return 0; - } else if (session->userauth_pswd_data[0] == SSH_MSG_USERAUTH_FAILURE) { + } + else if(session->userauth_pswd_data[0] == + SSH_MSG_USERAUTH_FAILURE) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Password authentication failed"); LIBSSH2_FREE(session, session->userauth_pswd_data); @@ -312,41 +410,54 @@ userauth_password(LIBSSH2_SESSION *session, session->userauth_pswd_state = libssh2_NB_state_sent1; } - if ((session->userauth_pswd_data[0] == + if(session->userauth_pswd_data_len < 1) { + session->userauth_pswd_state = libssh2_NB_state_idle; + return _libssh2_error(session, LIBSSH2_ERROR_PROTO, + "Unexpected packet size"); + } + + if((session->userauth_pswd_data[0] == SSH_MSG_USERAUTH_PASSWD_CHANGEREQ) || (session->userauth_pswd_data0 == SSH_MSG_USERAUTH_PASSWD_CHANGEREQ)) { session->userauth_pswd_data0 = SSH_MSG_USERAUTH_PASSWD_CHANGEREQ; - if ((session->userauth_pswd_state == libssh2_NB_state_sent1) || + if((session->userauth_pswd_state == libssh2_NB_state_sent1) || (session->userauth_pswd_state == libssh2_NB_state_sent2)) { - if (session->userauth_pswd_state == libssh2_NB_state_sent1) { + if(session->userauth_pswd_state == libssh2_NB_state_sent1) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Password change required"); LIBSSH2_FREE(session, session->userauth_pswd_data); session->userauth_pswd_data = NULL; } - if (passwd_change_cb) { - if (session->userauth_pswd_state == libssh2_NB_state_sent1) { + if(passwd_change_cb) { + if(session->userauth_pswd_state == + libssh2_NB_state_sent1) { passwd_change_cb(session, &session->userauth_pswd_newpw, &session->userauth_pswd_newpw_len, &session->abstract); - if (!session->userauth_pswd_newpw) { + if(!session->userauth_pswd_newpw) { return _libssh2_error(session, - LIBSSH2_ERROR_PASSWORD_EXPIRED, + LIBSSH2_ERROR_PASSWORD_EXPIRED, "Password expired, and " "callback failed"); } /* basic data_len + newpw_len(4) */ - session->userauth_pswd_data_len = - username_len + password_len + 44; + if(username_len + password_len + 44 <= UINT_MAX) { + session->userauth_pswd_data_len = + username_len + password_len + 44; + s = session->userauth_pswd_data = + LIBSSH2_ALLOC(session, + session->userauth_pswd_data_len); + } + else { + s = session->userauth_pswd_data = NULL; + session->userauth_pswd_data_len = 0; + } - s = session->userauth_pswd_data = - LIBSSH2_ALLOC(session, - session->userauth_pswd_data_len); - if (!session->userauth_pswd_data) { + if(!session->userauth_pswd_data) { LIBSSH2_FREE(session, session->userauth_pswd_newpw); session->userauth_pswd_newpw = NULL; @@ -371,15 +482,17 @@ userauth_password(LIBSSH2_SESSION *session, session->userauth_pswd_state = libssh2_NB_state_sent2; } - if (session->userauth_pswd_state == libssh2_NB_state_sent2) { + if(session->userauth_pswd_state == + libssh2_NB_state_sent2) { rc = _libssh2_transport_send(session, - session->userauth_pswd_data, - session->userauth_pswd_data_len, - (unsigned char *) - session->userauth_pswd_newpw, - session->userauth_pswd_newpw_len); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + session->userauth_pswd_data, + session->userauth_pswd_data_len, + (unsigned char *) + session->userauth_pswd_newpw, + session->userauth_pswd_newpw_len); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return _libssh2_error(session, + LIBSSH2_ERROR_EAGAIN, "Would block waiting"); } @@ -389,7 +502,7 @@ userauth_password(LIBSSH2_SESSION *session, LIBSSH2_FREE(session, session->userauth_pswd_newpw); session->userauth_pswd_newpw = NULL; - if (rc) { + if(rc) { return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send userauth " @@ -404,7 +517,8 @@ userauth_password(LIBSSH2_SESSION *session, goto password_response; } } - } else { + } + else { session->userauth_pswd_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_PASSWORD_EXPIRED, "Password Expired, and no callback " @@ -432,7 +546,8 @@ LIBSSH2_API int libssh2_userauth_password_ex(LIBSSH2_SESSION *session, const char *username, unsigned int username_len, const char *password, unsigned int password_len, - LIBSSH2_PASSWD_CHANGEREQ_FUNC((*passwd_change_cb))) + LIBSSH2_PASSWD_CHANGEREQ_FUNC + ((*passwd_change_cb))) { int rc; BLOCK_ADJUST(rc, session, @@ -454,13 +569,13 @@ memory_read_publickey(LIBSSH2_SESSION * session, unsigned char **method, size_t pubkey_len = pubkeyfiledata_len; unsigned int tmp_len; - if (pubkeyfiledata_len <= 1) { + if(pubkeyfiledata_len <= 1) { return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Invalid data in public key file"); } pubkey = LIBSSH2_ALLOC(session, pubkeyfiledata_len); - if (!pubkey) { + if(!pubkey) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for public key data"); } @@ -470,16 +585,17 @@ memory_read_publickey(LIBSSH2_SESSION * session, unsigned char **method, /* * Remove trailing whitespace */ - while (pubkey_len && isspace(pubkey[pubkey_len - 1])) + while(pubkey_len && isspace(pubkey[pubkey_len - 1])) pubkey_len--; - if (!pubkey_len) { + if(!pubkey_len) { LIBSSH2_FREE(session, pubkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Missing public key data"); } - if ((sp1 = memchr(pubkey, ' ', pubkey_len)) == NULL) { + sp1 = memchr(pubkey, ' ', pubkey_len); + if(sp1 == NULL) { LIBSSH2_FREE(session, pubkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Invalid public key data"); @@ -487,12 +603,13 @@ memory_read_publickey(LIBSSH2_SESSION * session, unsigned char **method, sp1++; - if ((sp2 = memchr(sp1, ' ', pubkey_len - (sp1 - pubkey - 1))) == NULL) { + sp2 = memchr(sp1, ' ', pubkey_len - (sp1 - pubkey)); + if(sp2 == NULL) { /* Assume that the id string is missing, but that it's okay */ sp2 = pubkey + pubkey_len; } - if (libssh2_base64_decode(session, (char **) &tmp, &tmp_len, + if(libssh2_base64_decode(session, (char **) &tmp, &tmp_len, (char *) sp1, sp2 - sp1)) { LIBSSH2_FREE(session, pubkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, @@ -538,29 +655,29 @@ file_read_publickey(LIBSSH2_SESSION * session, unsigned char **method, _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Loading public key file: %s", pubkeyfile); /* Read Public Key */ - fd = fopen(pubkeyfile, "r"); - if (!fd) { + fd = fopen(pubkeyfile, FOPEN_READTEXT); + if(!fd) { return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Unable to open public key file"); } - while (!feof(fd) && 1 == fread(&c, 1, 1, fd) && c != '\r' && c != '\n') { + while(!feof(fd) && 1 == fread(&c, 1, 1, fd) && c != '\r' && c != '\n') { pubkey_len++; } rewind(fd); - if (pubkey_len <= 1) { + if(pubkey_len <= 1) { fclose(fd); return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Invalid data in public key file"); } pubkey = LIBSSH2_ALLOC(session, pubkey_len); - if (!pubkey) { + if(!pubkey) { fclose(fd); return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for public key data"); } - if (fread(pubkey, 1, pubkey_len, fd) != pubkey_len) { + if(fread(pubkey, 1, pubkey_len, fd) != pubkey_len) { LIBSSH2_FREE(session, pubkey); fclose(fd); return _libssh2_error(session, LIBSSH2_ERROR_FILE, @@ -570,17 +687,18 @@ file_read_publickey(LIBSSH2_SESSION * session, unsigned char **method, /* * Remove trailing whitespace */ - while (pubkey_len && isspace(pubkey[pubkey_len - 1])) { + while(pubkey_len && isspace(pubkey[pubkey_len - 1])) { pubkey_len--; } - if (!pubkey_len) { + if(!pubkey_len) { LIBSSH2_FREE(session, pubkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Missing public key data"); } - if ((sp1 = memchr(pubkey, ' ', pubkey_len)) == NULL) { + sp1 = memchr(pubkey, ' ', pubkey_len); + if(sp1 == NULL) { LIBSSH2_FREE(session, pubkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, "Invalid public key data"); @@ -588,13 +706,14 @@ file_read_publickey(LIBSSH2_SESSION * session, unsigned char **method, sp1++; - sp_len = sp1 > pubkey ? (sp1 - pubkey) - 1 : 0; - if ((sp2 = memchr(sp1, ' ', pubkey_len - sp_len)) == NULL) { + sp_len = sp1 > pubkey ? (sp1 - pubkey) : 0; + sp2 = memchr(sp1, ' ', pubkey_len - sp_len); + if(sp2 == NULL) { /* Assume that the id string is missing, but that it's okay */ sp2 = pubkey + pubkey_len; } - if (libssh2_base64_decode(session, (char **) &tmp, &tmp_len, + if(libssh2_base64_decode(session, (char **) &tmp, &tmp_len, (char *) sp1, sp2 - sp1)) { LIBSSH2_FREE(session, pubkey); return _libssh2_error(session, LIBSSH2_ERROR_FILE, @@ -626,8 +745,8 @@ memory_read_privatekey(LIBSSH2_SESSION * session, *hostkey_method = NULL; *hostkey_abstract = NULL; - while (*hostkey_methods_avail && (*hostkey_methods_avail)->name) { - if ((*hostkey_methods_avail)->initPEMFromMemory + while(*hostkey_methods_avail && (*hostkey_methods_avail)->name) { + if((*hostkey_methods_avail)->initPEMFromMemory && strncmp((*hostkey_methods_avail)->name, (const char *) method, method_len) == 0) { *hostkey_method = *hostkey_methods_avail; @@ -635,12 +754,12 @@ memory_read_privatekey(LIBSSH2_SESSION * session, } hostkey_methods_avail++; } - if (!*hostkey_method) { + if(!*hostkey_method) { return _libssh2_error(session, LIBSSH2_ERROR_METHOD_NONE, "No handler for specified private key"); } - if ((*hostkey_method)-> + if((*hostkey_method)-> initPEMFromMemory(session, privkeyfiledata, privkeyfiledata_len, (unsigned char *) passphrase, hostkey_abstract)) { @@ -668,8 +787,8 @@ file_read_privatekey(LIBSSH2_SESSION * session, privkeyfile); *hostkey_method = NULL; *hostkey_abstract = NULL; - while (*hostkey_methods_avail && (*hostkey_methods_avail)->name) { - if ((*hostkey_methods_avail)->initPEM + while(*hostkey_methods_avail && (*hostkey_methods_avail)->name) { + if((*hostkey_methods_avail)->initPEM && strncmp((*hostkey_methods_avail)->name, (const char *) method, method_len) == 0) { *hostkey_method = *hostkey_methods_avail; @@ -677,12 +796,12 @@ file_read_privatekey(LIBSSH2_SESSION * session, } hostkey_methods_avail++; } - if (!*hostkey_method) { + if(!*hostkey_method) { return _libssh2_error(session, LIBSSH2_ERROR_METHOD_NONE, "No handler for specified private key"); } - if ((*hostkey_method)-> + if((*hostkey_method)-> initPEM(session, privkeyfile, (unsigned char *) passphrase, hostkey_abstract)) { return _libssh2_error(session, LIBSSH2_ERROR_FILE, @@ -720,15 +839,15 @@ sign_frommemory(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, datavec.iov_base = (void *)data; datavec.iov_len = data_len; - if (privkeyobj->signv(session, sig, sig_len, 1, &datavec, + if(privkeyobj->signv(session, sig, sig_len, 1, &datavec, &hostkey_abstract)) { - if (privkeyobj->dtor) { - privkeyobj->dtor(session, abstract); + if(privkeyobj->dtor) { + privkeyobj->dtor(session, &hostkey_abstract); } return -1; } - if (privkeyobj->dtor) { + if(privkeyobj->dtor) { privkeyobj->dtor(session, &hostkey_abstract); } return 0; @@ -756,15 +875,15 @@ sign_fromfile(LIBSSH2_SESSION *session, unsigned char **sig, size_t *sig_len, datavec.iov_base = (void *)data; datavec.iov_len = data_len; - if (privkeyobj->signv(session, sig, sig_len, 1, &datavec, + if(privkeyobj->signv(session, sig, sig_len, 1, &datavec, &hostkey_abstract)) { - if (privkeyobj->dtor) { + if(privkeyobj->dtor) { privkeyobj->dtor(session, &hostkey_abstract); } return -1; } - if (privkeyobj->dtor) { + if(privkeyobj->dtor) { privkeyobj->dtor(session, &hostkey_abstract); } return 0; @@ -786,9 +905,10 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, { int rc; - if (session->userauth_host_state == libssh2_NB_state_idle) { + if(session->userauth_host_state == libssh2_NB_state_idle) { const LIBSSH2_HOSTKEY_METHOD *privkeyobj; - unsigned char *pubkeydata, *sig = NULL; + unsigned char *pubkeydata = NULL; + unsigned char *sig = NULL; size_t pubkeydata_len = 0; size_t sig_len = 0; void *abstract; @@ -799,7 +919,7 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, memset(&session->userauth_host_packet_requirev_state, 0, sizeof(session->userauth_host_packet_requirev_state)); - if (publickey) { + if(publickey) { rc = file_read_publickey(session, &session->userauth_host_method, &session->userauth_host_method_len, &pubkeydata, &pubkeydata_len, publickey); @@ -814,7 +934,7 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, &session->userauth_host_method_len, &pubkeydata, &pubkeydata_len, privatekey, passphrase); - if (rc) + if(rc) /* libssh2_pub_priv_keyfile calls _libssh2_error() */ return rc; } @@ -839,7 +959,7 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, session->userauth_host_packet_len + 4 + (4 + session->userauth_host_method_len) + (4 + pubkeydata_len)); - if (!session->userauth_host_packet) { + if(!session->userauth_host_packet) { LIBSSH2_FREE(session, session->userauth_host_method); session->userauth_host_method = NULL; LIBSSH2_FREE(session, pubkeydata); @@ -883,31 +1003,31 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, datavec[2].iov_base = (void *)session->userauth_host_packet; datavec[2].iov_len = session->userauth_host_packet_len; - if (privkeyobj && privkeyobj->signv && + if(privkeyobj && privkeyobj->signv && privkeyobj->signv(session, &sig, &sig_len, 3, datavec, &abstract)) { LIBSSH2_FREE(session, session->userauth_host_method); session->userauth_host_method = NULL; LIBSSH2_FREE(session, session->userauth_host_packet); session->userauth_host_packet = NULL; - if (privkeyobj->dtor) { + if(privkeyobj->dtor) { privkeyobj->dtor(session, &abstract); } return -1; } - if (privkeyobj && privkeyobj->dtor) { + if(privkeyobj && privkeyobj->dtor) { privkeyobj->dtor(session, &abstract); } - if (sig_len > pubkeydata_len) { + if(sig_len > pubkeydata_len) { unsigned char *newpacket; /* Should *NEVER* happen, but...well.. better safe than sorry */ newpacket = LIBSSH2_REALLOC(session, session->userauth_host_packet, session->userauth_host_packet_len + 4 + (4 + session->userauth_host_method_len) + (4 + sig_len)); /* PK sigblob */ - if (!newpacket) { + if(!newpacket) { LIBSSH2_FREE(session, sig); LIBSSH2_FREE(session, session->userauth_host_packet); session->userauth_host_packet = NULL; @@ -924,7 +1044,8 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, session->userauth_host_packet + session->userauth_host_packet_len; _libssh2_store_u32(&session->userauth_host_s, - 4 + session->userauth_host_method_len + 4 + sig_len); + 4 + session->userauth_host_method_len + + 4 + sig_len); _libssh2_store_str(&session->userauth_host_s, (const char *)session->userauth_host_method, session->userauth_host_method_len); @@ -941,15 +1062,16 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, session->userauth_host_state = libssh2_NB_state_created; } - if (session->userauth_host_state == libssh2_NB_state_created) { + if(session->userauth_host_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, session->userauth_host_packet, session->userauth_host_s - session->userauth_host_packet, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block"); } - else if (rc) { + else if(rc) { LIBSSH2_FREE(session, session->userauth_host_packet); session->userauth_host_packet = NULL; session->userauth_host_state = libssh2_NB_state_idle; @@ -962,7 +1084,7 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, session->userauth_host_state = libssh2_NB_state_sent; } - if (session->userauth_host_state == libssh2_NB_state_sent) { + if(session->userauth_host_state == libssh2_NB_state_sent) { static const unsigned char reply_codes[3] = { SSH_MSG_USERAUTH_SUCCESS, SSH_MSG_USERAUTH_FAILURE, 0 }; size_t data_len; @@ -971,17 +1093,18 @@ userauth_hostbased_fromfile(LIBSSH2_SESSION *session, &data_len, 0, NULL, 0, &session-> userauth_host_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block"); } session->userauth_host_state = libssh2_NB_state_idle; - if (rc) { + if(rc || data_len < 1) { return _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_UNVERIFIED, "Auth failed"); } - if (session->userauth_host_data[0] == SSH_MSG_USERAUTH_SUCCESS) { + if(session->userauth_host_data[0] == SSH_MSG_USERAUTH_SUCCESS) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Hostbased authentication successful"); /* We are us and we've proved it. */ @@ -1024,7 +1147,160 @@ libssh2_userauth_hostbased_fromfile_ex(LIBSSH2_SESSION *session, return rc; } +static int plain_method_len(const char *method, size_t method_len) +{ + if(!strncmp("ecdsa-sha2-nistp256-cert-v01@openssh.com", + method, + method_len) || + !strncmp("ecdsa-sha2-nistp384-cert-v01@openssh.com", + method, + method_len) || + !strncmp("ecdsa-sha2-nistp521-cert-v01@openssh.com", + method, + method_len)) { + return 19; + } + return method_len; +} + +/** + * @function _libssh2_key_sign_algorithm + * @abstract Upgrades the algorithm used for public key signing RFC 8332 + * @discussion Based on the incoming key_method value, this function + * will upgrade the key method input based on user preferences, + * server support algos and crypto backend support + * @related _libssh2_supported_key_sign_algorithms() + * @param key_method current key method, usually the default key sig method + * @param key_method_len length of the key method buffer + * @result error code or zero on success + */ + +static int +_libssh2_key_sign_algorithm(LIBSSH2_SESSION *session, + unsigned char **key_method, + size_t *key_method_len) +{ + const char *s = NULL; + const char *a = NULL; + const char *match = NULL; + const char *p = NULL; + const char *f = NULL; + char *i = NULL; + int p_len = 0; + int f_len = 0; + int rc = 0; + int match_len = 0; + char *filtered_algs = NULL; + + const char *supported_algs = + _libssh2_supported_key_sign_algorithms(session, + *key_method, + *key_method_len); + + if(supported_algs == NULL || session->server_sign_algorithms == NULL) { + /* no upgrading key algorithm supported, do nothing */ + return LIBSSH2_ERROR_NONE; + } + + filtered_algs = LIBSSH2_ALLOC(session, strlen(supported_algs) + 1); + if(!filtered_algs) { + rc = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate filtered algs"); + return rc; + } + + s = session->server_sign_algorithms; + i = filtered_algs; + /* this walks the server algo list and the supported algo list and creates + a filtered list that includes matches */ + + while(s && *s) { + p = strchr(s, ','); + p_len = p ? (p - s) : (int) strlen(s); + a = supported_algs; + + while(a && *a) { + f = strchr(a, ','); + f_len = f ? (f - a) : (int) strlen(a); + + if(f_len == p_len && memcmp(a, s, p_len) == 0) { + + if(i != filtered_algs) { + memcpy(i, ",", 1); + i += 1; + } + + memcpy(i, s, p_len); + i += p_len; + } + + a = f ? (f + 1) : NULL; + } + + s = p ? (p + 1) : NULL; + } + + filtered_algs[i - filtered_algs] = '\0'; + + if(session->sign_algo_prefs) { + s = session->sign_algo_prefs; + } + else { + s = supported_algs; + } + + /* now that we have the possible supported algos, match based on the prefs + or what is supported by the crypto backend, look for a match */ + + while(s && *s && !match) { + p = strchr(s, ','); + p_len = p ? (p - s) : (int) strlen(s); + a = filtered_algs; + + while(a && *a && !match) { + f = strchr(a, ','); + f_len = f ? (f - a) : (int) strlen(a); + + if(f_len == p_len && memcmp(a, s, p_len) == 0) { + /* found a match, upgrade key method */ + match = s; + match_len = p_len; + } + else { + a = f ? (f + 1) : NULL; + } + } + + s = p ? (p + 1) : NULL; + } + + if(match != NULL) { + if(*key_method) + LIBSSH2_FREE(session, *key_method); + + *key_method = LIBSSH2_ALLOC(session, match_len); + if(key_method) { + memcpy(*key_method, match, match_len); + *key_method_len = match_len; + } + else { + *key_method_len = 0; + rc = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate key method upgrade"); + } + } + else { + /* no match was found */ + rc = _libssh2_error(session, LIBSSH2_ERROR_METHOD_NONE, + "No signing signature matched"); + } + + if(filtered_algs) + LIBSSH2_FREE(session, filtered_algs); + + return rc; +} int _libssh2_userauth_publickey(LIBSSH2_SESSION *session, @@ -1032,7 +1308,8 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, unsigned int username_len, const unsigned char *pubkeydata, unsigned long pubkeydata_len, - LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC((*sign_callback)), + LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC + ((*sign_callback)), void *abstract) { unsigned char reply_codes[4] = @@ -1041,14 +1318,18 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, }; int rc; unsigned char *s; + int auth_attempts = 0; - if (session->userauth_pblc_state == libssh2_NB_state_idle) { + retry_auth: + auth_attempts++; + + if(session->userauth_pblc_state == libssh2_NB_state_idle) { /* * The call to _libssh2_ntohu32 later relies on pubkeydata having at * least 4 valid bytes containing the length of the method name. */ - if (pubkeydata_len < 4) + if(pubkeydata_len < 4) return _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_UNVERIFIED, "Invalid public key, too short"); @@ -1062,10 +1343,10 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, * allocation/free. * For other uses, we allocate and populate it here. */ - if (!session->userauth_pblc_method) { + if(!session->userauth_pblc_method) { session->userauth_pblc_method_len = _libssh2_ntohu32(pubkeydata); - if(session->userauth_pblc_method_len > pubkeydata_len) + if(session->userauth_pblc_method_len > pubkeydata_len - 4) /* the method length simply cannot be longer than the entire passed in data, so we use this to detect crazy input data */ @@ -1075,23 +1356,35 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, session->userauth_pblc_method = LIBSSH2_ALLOC(session, session->userauth_pblc_method_len); - if (!session->userauth_pblc_method) { + if(!session->userauth_pblc_method) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, - "Unable to allocate memory for public key " - "data"); + "Unable to allocate memory " + "for public key data"); } memcpy(session->userauth_pblc_method, pubkeydata + 4, session->userauth_pblc_method_len); } - /* - * The length of the method name read from plaintext prefix in the - * file must match length embedded in the key. - * TODO: The data should match too but we don't check that. Should we? - */ - else if (session->userauth_pblc_method_len != - _libssh2_ntohu32(pubkeydata)) - return _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_UNVERIFIED, - "Invalid public key"); + + /* upgrade key signing algo if it is supported and + * it is our first auth attempt, otherwise fallback to + * the key default algo */ + if(auth_attempts == 1) { + rc = _libssh2_key_sign_algorithm(session, + &session->userauth_pblc_method, + &session->userauth_pblc_method_len); + + if(rc) + return rc; + } + + if(session->userauth_pblc_method_len && + session->userauth_pblc_method) { + _libssh2_debug(session, + LIBSSH2_TRACE_KEX, + "Signing using %.*s", + session->userauth_pblc_method_len, + session->userauth_pblc_method); + } /* * 45 = packet_type(1) + username_len(4) + servicename_len(4) + @@ -1117,7 +1410,7 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, session->userauth_pblc_packet_len + 4 + (4 + session->userauth_pblc_method_len) + (4 + pubkeydata_len)); - if (!session->userauth_pblc_packet) { + if(!session->userauth_pblc_packet) { LIBSSH2_FREE(session, session->userauth_pblc_method); session->userauth_pblc_method = NULL; return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, @@ -1143,13 +1436,14 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, session->userauth_pblc_state = libssh2_NB_state_created; } - if (session->userauth_pblc_state == libssh2_NB_state_created) { + if(session->userauth_pblc_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, session->userauth_pblc_packet, session->userauth_pblc_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); - else if (rc) { + if(rc == LIBSSH2_ERROR_EAGAIN) + return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block"); + else if(rc) { LIBSSH2_FREE(session, session->userauth_pblc_packet); session->userauth_pblc_packet = NULL; LIBSSH2_FREE(session, session->userauth_pblc_method); @@ -1162,17 +1456,18 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, session->userauth_pblc_state = libssh2_NB_state_sent; } - if (session->userauth_pblc_state == libssh2_NB_state_sent) { + if(session->userauth_pblc_state == libssh2_NB_state_sent) { rc = _libssh2_packet_requirev(session, reply_codes, &session->userauth_pblc_data, &session->userauth_pblc_data_len, 0, NULL, 0, &session-> userauth_pblc_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); + if(rc == LIBSSH2_ERROR_EAGAIN) { + return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block"); } - else if (rc) { + else if(rc || (session->userauth_pblc_data_len < 1)) { LIBSSH2_FREE(session, session->userauth_pblc_packet); session->userauth_pblc_packet = NULL; LIBSSH2_FREE(session, session->userauth_pblc_method); @@ -1182,7 +1477,7 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, "Waiting for USERAUTH response"); } - if (session->userauth_pblc_data[0] == SSH_MSG_USERAUTH_SUCCESS) { + if(session->userauth_pblc_data[0] == SSH_MSG_USERAUTH_SUCCESS) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Pubkey authentication prematurely successful"); /* @@ -1200,7 +1495,7 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, return 0; } - if (session->userauth_pblc_data[0] == SSH_MSG_USERAUTH_FAILURE) { + if(session->userauth_pblc_data[0] == SSH_MSG_USERAUTH_FAILURE) { /* This public key is not allowed for this user on this server */ LIBSSH2_FREE(session, session->userauth_pblc_data); session->userauth_pblc_data = NULL; @@ -1221,14 +1516,14 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, session->userauth_pblc_state = libssh2_NB_state_sent1; } - if (session->userauth_pblc_state == libssh2_NB_state_sent1) { + if(session->userauth_pblc_state == libssh2_NB_state_sent1) { unsigned char *buf; unsigned char *sig; size_t sig_len; s = buf = LIBSSH2_ALLOC(session, 4 + session->session_id_len + session->userauth_pblc_packet_len); - if (!buf) { + if(!buf) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "userauth-publickey signed data"); @@ -1237,15 +1532,28 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, _libssh2_store_str(&s, (const char *)session->session_id, session->session_id_len); - memcpy (s, session->userauth_pblc_packet, - session->userauth_pblc_packet_len); + memcpy(s, session->userauth_pblc_packet, + session->userauth_pblc_packet_len); s += session->userauth_pblc_packet_len; rc = sign_callback(session, &sig, &sig_len, buf, s - buf, abstract); LIBSSH2_FREE(session, buf); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); - } else if (rc) { + if(rc == LIBSSH2_ERROR_EAGAIN) { + return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block"); + } + else if(rc == LIBSSH2_ERROR_ALGO_UNSUPPORTED && auth_attempts == 1) { + /* try again with the default key algo */ + LIBSSH2_FREE(session, session->userauth_pblc_method); + session->userauth_pblc_method = NULL; + LIBSSH2_FREE(session, session->userauth_pblc_packet); + session->userauth_pblc_packet = NULL; + session->userauth_pblc_state = libssh2_NB_state_idle; + + rc = LIBSSH2_ERROR_NONE; + goto retry_auth; + } + else if(rc) { LIBSSH2_FREE(session, session->userauth_pblc_method); session->userauth_pblc_method = NULL; LIBSSH2_FREE(session, session->userauth_pblc_packet); @@ -1259,7 +1567,7 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, * If this function was restarted, pubkeydata_len might still be 0 * which will cause an unnecessary but harmless realloc here. */ - if (sig_len > pubkeydata_len) { + if(sig_len > pubkeydata_len) { unsigned char *newpacket; /* Should *NEVER* happen, but...well.. better safe than sorry */ newpacket = LIBSSH2_REALLOC(session, @@ -1267,7 +1575,7 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, session->userauth_pblc_packet_len + 4 + (4 + session->userauth_pblc_method_len) + (4 + sig_len)); /* PK sigblob */ - if (!newpacket) { + if(!newpacket) { LIBSSH2_FREE(session, sig); LIBSSH2_FREE(session, session->userauth_pblc_packet); session->userauth_pblc_packet = NULL; @@ -1284,8 +1592,13 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, s = session->userauth_pblc_packet + session->userauth_pblc_packet_len; session->userauth_pblc_b = NULL; + session->userauth_pblc_method_len = + plain_method_len((const char *)session->userauth_pblc_method, + session->userauth_pblc_method_len); + _libssh2_store_u32(&s, - 4 + session->userauth_pblc_method_len + 4 + sig_len); + 4 + session->userauth_pblc_method_len + 4 + + sig_len); _libssh2_store_str(&s, (const char *)session->userauth_pblc_method, session->userauth_pblc_method_len); @@ -1302,14 +1615,16 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, session->userauth_pblc_state = libssh2_NB_state_sent2; } - if (session->userauth_pblc_state == libssh2_NB_state_sent2) { + if(session->userauth_pblc_state == libssh2_NB_state_sent2) { rc = _libssh2_transport_send(session, session->userauth_pblc_packet, session->userauth_pblc_s - session->userauth_pblc_packet, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); - } else if (rc) { + if(rc == LIBSSH2_ERROR_EAGAIN) { + return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block"); + } + else if(rc) { LIBSSH2_FREE(session, session->userauth_pblc_packet); session->userauth_pblc_packet = NULL; session->userauth_pblc_state = libssh2_NB_state_idle; @@ -1326,19 +1641,20 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, reply_codes[2] = 0; rc = _libssh2_packet_requirev(session, reply_codes, - &session->userauth_pblc_data, - &session->userauth_pblc_data_len, 0, NULL, 0, - &session->userauth_pblc_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + &session->userauth_pblc_data, + &session->userauth_pblc_data_len, 0, NULL, 0, + &session->userauth_pblc_packet_requirev_state); + if(rc == LIBSSH2_ERROR_EAGAIN) { return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block requesting userauth list"); - } else if (rc) { + } + else if(rc || session->userauth_pblc_data_len < 1) { session->userauth_pblc_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_PUBLICKEY_UNVERIFIED, "Waiting for publickey USERAUTH response"); } - if (session->userauth_pblc_data[0] == SSH_MSG_USERAUTH_SUCCESS) { + if(session->userauth_pblc_data[0] == SSH_MSG_USERAUTH_SUCCESS) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Publickey authentication successful"); /* We are us and we've proved it. */ @@ -1381,8 +1697,8 @@ userauth_publickey_frommemory(LIBSSH2_SESSION *session, privkey_file.filename = privatekeydata; privkey_file.passphrase = passphrase; - if (session->userauth_pblc_state == libssh2_NB_state_idle) { - if (publickeydata_len && publickeydata) { + if(session->userauth_pblc_state == libssh2_NB_state_idle) { + if(publickeydata_len && publickeydata) { rc = memory_read_publickey(session, &session->userauth_pblc_method, &session->userauth_pblc_method_len, &pubkeydata, &pubkeydata_len, @@ -1390,17 +1706,16 @@ userauth_publickey_frommemory(LIBSSH2_SESSION *session, if(rc) return rc; } - else if (privatekeydata_len && privatekeydata) { + else if(privatekeydata_len && privatekeydata) { /* Compute public key from private key. */ - if (_libssh2_pub_priv_keyfilememory(session, - &session->userauth_pblc_method, - &session->userauth_pblc_method_len, - &pubkeydata, &pubkeydata_len, - privatekeydata, privatekeydata_len, - passphrase)) - return _libssh2_error(session, LIBSSH2_ERROR_FILE, - "Unable to extract public key " - "from private key."); + rc = _libssh2_pub_priv_keyfilememory(session, + &session->userauth_pblc_method, + &session->userauth_pblc_method_len, + &pubkeydata, &pubkeydata_len, + privatekeydata, privatekeydata_len, + passphrase); + if(rc) + return rc; } else { return _libssh2_error(session, LIBSSH2_ERROR_FILE, @@ -1438,12 +1753,12 @@ userauth_publickey_fromfile(LIBSSH2_SESSION *session, privkey_file.filename = privatekey; privkey_file.passphrase = passphrase; - if (session->userauth_pblc_state == libssh2_NB_state_idle) { - if (publickey) { + if(session->userauth_pblc_state == libssh2_NB_state_idle) { + if(publickey) { rc = file_read_publickey(session, &session->userauth_pblc_method, &session->userauth_pblc_method_len, - &pubkeydata, &pubkeydata_len,publickey); - if (rc) + &pubkeydata, &pubkeydata_len, publickey); + if(rc) return rc; } else { @@ -1455,7 +1770,7 @@ userauth_publickey_fromfile(LIBSSH2_SESSION *session, privatekey, passphrase); /* _libssh2_pub_priv_keyfile calls _libssh2_error() */ - if (rc) + if(rc) return rc; } } @@ -1487,7 +1802,7 @@ libssh2_userauth_publickey_frommemory(LIBSSH2_SESSION *session, if(NULL == passphrase) /* if given a NULL pointer, make it point to a zero-length string to save us from having to check this all over */ - passphrase=""; + passphrase = ""; BLOCK_ADJUST(rc, session, userauth_publickey_frommemory(session, user, user_len, @@ -1515,7 +1830,7 @@ libssh2_userauth_publickey_fromfile_ex(LIBSSH2_SESSION *session, if(NULL == passphrase) /* if given a NULL pointer, make it point to a zero-length string to save us from having to check this all over */ - passphrase=""; + passphrase = ""; BLOCK_ADJUST(rc, session, userauth_publickey_fromfile(session, user, user_len, @@ -1532,7 +1847,8 @@ libssh2_userauth_publickey(LIBSSH2_SESSION *session, const char *user, const unsigned char *pubkeydata, size_t pubkeydata_len, - LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC((*sign_callback)), + LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC + ((*sign_callback)), void **abstract) { int rc; @@ -1558,7 +1874,8 @@ static int userauth_keyboard_interactive(LIBSSH2_SESSION * session, const char *username, unsigned int username_len, - LIBSSH2_USERAUTH_KBDINT_RESPONSE_FUNC((*response_callback))) + LIBSSH2_USERAUTH_KBDINT_RESPONSE_FUNC + ((*response_callback))) { unsigned char *s; int rc; @@ -1570,7 +1887,7 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, unsigned int language_tag_len; unsigned int i; - if (session->userauth_kybd_state == libssh2_NB_state_idle) { + if(session->userauth_kybd_state == libssh2_NB_state_idle) { session->userauth_kybd_auth_name = NULL; session->userauth_kybd_auth_instruction = NULL; session->userauth_kybd_num_prompts = 0; @@ -1595,7 +1912,7 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, session->userauth_kybd_data = s = LIBSSH2_ALLOC(session, session->userauth_kybd_packet_len); - if (!s) { + if(!s) { return _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "keyboard-interactive authentication"); @@ -1624,18 +1941,21 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, session->userauth_kybd_state = libssh2_NB_state_created; } - if (session->userauth_kybd_state == libssh2_NB_state_created) { + if(session->userauth_kybd_state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, session->userauth_kybd_data, session->userauth_kybd_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) { - return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); - } else if (rc) { + if(rc == LIBSSH2_ERROR_EAGAIN) { + return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, + "Would block"); + } + else if(rc) { LIBSSH2_FREE(session, session->userauth_kybd_data); session->userauth_kybd_data = NULL; session->userauth_kybd_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, - "Unable to send keyboard-interactive request"); + "Unable to send keyboard-interactive" + " request"); } LIBSSH2_FREE(session, session->userauth_kybd_data); session->userauth_kybd_data = NULL; @@ -1644,26 +1964,29 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, } for(;;) { - if (session->userauth_kybd_state == libssh2_NB_state_sent) { + if(session->userauth_kybd_state == libssh2_NB_state_sent) { rc = _libssh2_packet_requirev(session, reply_codes, &session->userauth_kybd_data, &session->userauth_kybd_data_len, 0, NULL, 0, &session-> userauth_kybd_packet_requirev_state); - if (rc == LIBSSH2_ERROR_EAGAIN) { + if(rc == LIBSSH2_ERROR_EAGAIN) { return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); - } else if (rc) { + } + else if(rc || session->userauth_kybd_data_len < 1) { session->userauth_kybd_state = libssh2_NB_state_idle; return _libssh2_error(session, LIBSSH2_ERROR_AUTHENTICATION_FAILED, - "Waiting for keyboard USERAUTH response"); + "Waiting for keyboard " + "USERAUTH response"); } - if (session->userauth_kybd_data[0] == SSH_MSG_USERAUTH_SUCCESS) { + if(session->userauth_kybd_data[0] == SSH_MSG_USERAUTH_SUCCESS) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, - "Keyboard-interactive authentication successful"); + "Keyboard-interactive " + "authentication successful"); LIBSSH2_FREE(session, session->userauth_kybd_data); session->userauth_kybd_data = NULL; session->state |= LIBSSH2_STATE_AUTHENTICATED; @@ -1671,7 +1994,7 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, return 0; } - if (session->userauth_kybd_data[0] == SSH_MSG_USERAUTH_FAILURE) { + if(session->userauth_kybd_data[0] == SSH_MSG_USERAUTH_FAILURE) { _libssh2_debug(session, LIBSSH2_TRACE_AUTH, "Keyboard-interactive authentication failed"); LIBSSH2_FREE(session, session->userauth_kybd_data); @@ -1686,61 +2009,135 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, /* server requested PAM-like conversation */ s = session->userauth_kybd_data + 1; - /* string name (ISO-10646 UTF-8) */ - session->userauth_kybd_auth_name_len = _libssh2_ntohu32(s); - s += 4; + if(session->userauth_kybd_data_len >= 5) { + /* string name (ISO-10646 UTF-8) */ + session->userauth_kybd_auth_name_len = _libssh2_ntohu32(s); + s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too small" + "to get length"); + goto cleanup; + } + if(session->userauth_kybd_auth_name_len) { session->userauth_kybd_auth_name = LIBSSH2_ALLOC(session, session->userauth_kybd_auth_name_len); - if (!session->userauth_kybd_auth_name) { + if(!session->userauth_kybd_auth_name) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "keyboard-interactive 'name' " "request field"); goto cleanup; } - memcpy(session->userauth_kybd_auth_name, s, - session->userauth_kybd_auth_name_len); - s += session->userauth_kybd_auth_name_len; + if(s + session->userauth_list_data_len <= + session->userauth_kybd_data + + session->userauth_kybd_data_len) { + memcpy(session->userauth_kybd_auth_name, s, + session->userauth_kybd_auth_name_len); + s += session->userauth_kybd_auth_name_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too small" + "for auth name"); + goto cleanup; + } + } + + if(s + 4 <= session->userauth_kybd_data + + session->userauth_kybd_data_len) { + /* string instruction (ISO-10646 UTF-8) */ + session->userauth_kybd_auth_instruction_len = + _libssh2_ntohu32(s); + s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too small" + "for auth instruction length"); + goto cleanup; } - /* string instruction (ISO-10646 UTF-8) */ - session->userauth_kybd_auth_instruction_len = _libssh2_ntohu32(s); - s += 4; if(session->userauth_kybd_auth_instruction_len) { session->userauth_kybd_auth_instruction = LIBSSH2_ALLOC(session, session->userauth_kybd_auth_instruction_len); - if (!session->userauth_kybd_auth_instruction) { + if(!session->userauth_kybd_auth_instruction) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "keyboard-interactive 'instruction' " "request field"); goto cleanup; } - memcpy(session->userauth_kybd_auth_instruction, s, - session->userauth_kybd_auth_instruction_len); - s += session->userauth_kybd_auth_instruction_len; + if(s + session->userauth_kybd_auth_instruction_len <= + session->userauth_kybd_data + + session->userauth_kybd_data_len) { + memcpy(session->userauth_kybd_auth_instruction, s, + session->userauth_kybd_auth_instruction_len); + s += session->userauth_kybd_auth_instruction_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too small" + "for auth instruction"); + goto cleanup; + } } - /* string language tag (as defined in [RFC-3066]) */ - language_tag_len = _libssh2_ntohu32(s); - s += 4; + if(s + 4 <= session->userauth_kybd_data + + session->userauth_kybd_data_len) { + /* string language tag (as defined in [RFC-3066]) */ + language_tag_len = _libssh2_ntohu32(s); + s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too small" + "for auth language tag length"); + goto cleanup; + } - /* ignoring this field as deprecated */ - s += language_tag_len; + if(s + language_tag_len <= session->userauth_kybd_data + + session->userauth_kybd_data_len) { + /* ignoring this field as deprecated */ + s += language_tag_len; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too small" + "for auth language tag"); + goto cleanup; + } - /* int num-prompts */ - session->userauth_kybd_num_prompts = _libssh2_ntohu32(s); - s += 4; + if(s + 4 <= session->userauth_kybd_data + + session->userauth_kybd_data_len) { + /* int num-prompts */ + session->userauth_kybd_num_prompts = _libssh2_ntohu32(s); + s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too small" + "for auth num keyboard prompts"); + goto cleanup; + } + + if(session->userauth_kybd_num_prompts > 100) { + _libssh2_error(session, LIBSSH2_ERROR_OUT_OF_BOUNDARY, + "Too many replies for " + "keyboard-interactive prompts"); + goto cleanup; + } if(session->userauth_kybd_num_prompts) { session->userauth_kybd_prompts = LIBSSH2_CALLOC(session, sizeof(LIBSSH2_USERAUTH_KBDINT_PROMPT) * session->userauth_kybd_num_prompts); - if (!session->userauth_kybd_prompts) { + if(!session->userauth_kybd_prompts) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "keyboard-interactive prompts array"); @@ -1751,7 +2148,7 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, LIBSSH2_CALLOC(session, sizeof(LIBSSH2_USERAUTH_KBDINT_RESPONSE) * session->userauth_kybd_num_prompts); - if (!session->userauth_kybd_responses) { + if(!session->userauth_kybd_responses) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "keyboard-interactive responses array"); @@ -1759,25 +2156,56 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, } for(i = 0; i < session->userauth_kybd_num_prompts; i++) { - /* string prompt[1] (ISO-10646 UTF-8) */ - session->userauth_kybd_prompts[i].length = - _libssh2_ntohu32(s); - s += 4; + if(s + 4 <= session->userauth_kybd_data + + session->userauth_kybd_data_len) { + /* string prompt[1] (ISO-10646 UTF-8) */ + session->userauth_kybd_prompts[i].length = + _libssh2_ntohu32(s); + s += 4; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too " + "small for auth keyboard " + "prompt length"); + goto cleanup; + } + session->userauth_kybd_prompts[i].text = LIBSSH2_CALLOC(session, - session->userauth_kybd_prompts[i].length); - if (!session->userauth_kybd_prompts[i].text) { + session->userauth_kybd_prompts[i]. + length); + if(!session->userauth_kybd_prompts[i].text) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for " "keyboard-interactive prompt message"); goto cleanup; } - memcpy(session->userauth_kybd_prompts[i].text, s, - session->userauth_kybd_prompts[i].length); - s += session->userauth_kybd_prompts[i].length; - /* boolean echo[1] */ - session->userauth_kybd_prompts[i].echo = *s++; + if(s + session->userauth_kybd_prompts[i].length <= + session->userauth_kybd_data + + session->userauth_kybd_data_len) { + memcpy(session->userauth_kybd_prompts[i].text, s, + session->userauth_kybd_prompts[i].length); + s += session->userauth_kybd_prompts[i].length; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too " + "small for auth keyboard prompt"); + goto cleanup; + } + if(s < session->userauth_kybd_data + + session->userauth_kybd_data_len) { + /* boolean echo[1] */ + session->userauth_kybd_prompts[i].echo = *s++; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_BUFFER_TOO_SMALL, + "userauth keyboard data buffer too " + "small for auth keyboard prompt echo"); + goto cleanup; + } } } @@ -1801,8 +2229,17 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, for(i = 0; i < session->userauth_kybd_num_prompts; i++) { /* string response[1] (ISO-10646 UTF-8) */ - session->userauth_kybd_packet_len += - 4 + session->userauth_kybd_responses[i].length; + if(session->userauth_kybd_responses[i].length <= + (SIZE_MAX - 4 - session->userauth_kybd_packet_len) ) { + session->userauth_kybd_packet_len += + 4 + session->userauth_kybd_responses[i].length; + } + else { + _libssh2_error(session, LIBSSH2_ERROR_ALLOC, + "Unable to allocate memory for keyboard-" + "interactive response packet"); + goto cleanup; + } } /* A new userauth_kybd_data area is to be allocated, free the @@ -1811,7 +2248,7 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, session->userauth_kybd_data = s = LIBSSH2_ALLOC(session, session->userauth_kybd_packet_len); - if (!s) { + if(!s) { _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for keyboard-" "interactive response packet"); @@ -1831,14 +2268,14 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, session->userauth_kybd_state = libssh2_NB_state_sent1; } - if (session->userauth_kybd_state == libssh2_NB_state_sent1) { + if(session->userauth_kybd_state == libssh2_NB_state_sent1) { rc = _libssh2_transport_send(session, session->userauth_kybd_data, session->userauth_kybd_packet_len, NULL, 0); - if (rc == LIBSSH2_ERROR_EAGAIN) + if(rc == LIBSSH2_ERROR_EAGAIN) return _libssh2_error(session, LIBSSH2_ERROR_EAGAIN, "Would block"); - if (rc) { + if(rc) { _libssh2_error(session, LIBSSH2_ERROR_SOCKET_SEND, "Unable to send userauth-keyboard-interactive" " request"); @@ -1857,14 +2294,14 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, LIBSSH2_FREE(session, session->userauth_kybd_data); session->userauth_kybd_data = NULL; - if (session->userauth_kybd_prompts) { + if(session->userauth_kybd_prompts) { for(i = 0; i < session->userauth_kybd_num_prompts; i++) { LIBSSH2_FREE(session, session->userauth_kybd_prompts[i].text); session->userauth_kybd_prompts[i].text = NULL; } } - if (session->userauth_kybd_responses) { + if(session->userauth_kybd_responses) { for(i = 0; i < session->userauth_kybd_num_prompts; i++) { LIBSSH2_FREE(session, session->userauth_kybd_responses[i].text); @@ -1889,7 +2326,7 @@ userauth_keyboard_interactive(LIBSSH2_SESSION * session, session->userauth_kybd_auth_instruction = NULL; } - if (session->userauth_kybd_auth_failure) { + if(session->userauth_kybd_auth_failure) { session->userauth_kybd_state = libssh2_NB_state_idle; return -1; } @@ -1907,7 +2344,8 @@ LIBSSH2_API int libssh2_userauth_keyboard_interactive_ex(LIBSSH2_SESSION *session, const char *user, unsigned int user_len, - LIBSSH2_USERAUTH_KBDINT_RESPONSE_FUNC((*response_callback))) + LIBSSH2_USERAUTH_KBDINT_RESPONSE_FUNC + ((*response_callback))) { int rc; BLOCK_ADJUST(rc, session, diff --git a/vendor/libssh2/src/userauth.h b/vendor/libssh2/src/userauth.h index c0442ae158..6b402ddbf9 100644 --- a/vendor/libssh2/src/userauth.h +++ b/vendor/libssh2/src/userauth.h @@ -1,5 +1,5 @@ -#ifndef LIBSSH2_USERAUTH_H -#define LIBSSH2_USERAUTH_H +#ifndef __LIBSSH2_USERAUTH_H +#define __LIBSSH2_USERAUTH_H /* Copyright (c) 2004-2007, Sara Golemon * Copyright (c) 2009-2010 by Daniel Stenberg * All rights reserved. @@ -44,7 +44,8 @@ _libssh2_userauth_publickey(LIBSSH2_SESSION *session, unsigned int username_len, const unsigned char *pubkeydata, unsigned long pubkeydata_len, - LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC((*sign_callback)), + LIBSSH2_USERAUTH_PUBLICKEY_SIGN_FUNC + ((*sign_callback)), void *abstract); -#endif /* LIBSSH2_USERAUTH_H */ +#endif /* __LIBSSH2_USERAUTH_H */ diff --git a/vendor/libssh2/src/wincng.c b/vendor/libssh2/src/wincng.c index d3271b3e33..654f50db09 100755 --- a/vendor/libssh2/src/wincng.c +++ b/vendor/libssh2/src/wincng.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 Marc Hoersken + * Copyright (C) 2013-2020 Marc Hoersken * All rights reserved. * * Redistribution and use in source and binary forms, @@ -58,7 +58,9 @@ #include #include +#include #include +#include "misc.h" #ifdef HAVE_STDLIB_H #include @@ -97,6 +99,10 @@ #define BCRYPT_SHA256_ALGORITHM L"SHA256" #endif +#ifndef BCRYPT_SHA384_ALGORITHM +#define BCRYPT_SHA384_ALGORITHM L"SHA384" +#endif + #ifndef BCRYPT_SHA512_ALGORITHM #define BCRYPT_SHA512_ALGORITHM L"SHA512" #endif @@ -121,6 +127,15 @@ #define BCRYPT_3DES_ALGORITHM L"3DES" #endif +#ifndef BCRYPT_DH_ALGORITHM +#define BCRYPT_DH_ALGORITHM L"DH" +#endif + +/* BCRYPT_KDF_RAW_SECRET is available from Windows 8.1 and onwards */ +#ifndef BCRYPT_KDF_RAW_SECRET +#define BCRYPT_KDF_RAW_SECRET L"TRUNCATE" +#endif + #ifndef BCRYPT_ALG_HANDLE_HMAC_FLAG #define BCRYPT_ALG_HANDLE_HMAC_FLAG 0x00000008 #endif @@ -207,92 +222,194 @@ * Windows CNG backend: Generic functions */ +struct _libssh2_wincng_ctx _libssh2_wincng; + void _libssh2_wincng_init(void) { int ret; - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgRNG, + memset(&_libssh2_wincng, 0, sizeof(_libssh2_wincng)); + + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgRNG, BCRYPT_RNG_ALGORITHM, NULL, 0); + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgRNG = NULL; + } - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashMD5, + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashMD5, BCRYPT_MD5_ALGORITHM, NULL, 0); - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashSHA1, + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHashMD5 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashSHA1, BCRYPT_SHA1_ALGORITHM, NULL, 0); - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashSHA256, + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHashSHA1 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashSHA256, BCRYPT_SHA256_ALGORITHM, NULL, 0); - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashSHA512, + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHashSHA256 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashSHA384, + BCRYPT_SHA384_ALGORITHM, NULL, 0); + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHashSHA384 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHashSHA512, BCRYPT_SHA512_ALGORITHM, NULL, 0); + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHashSHA512 = NULL; + } - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacMD5, + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacMD5, BCRYPT_MD5_ALGORITHM, NULL, BCRYPT_ALG_HANDLE_HMAC_FLAG); - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacSHA1, + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHmacMD5 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacSHA1, BCRYPT_SHA1_ALGORITHM, NULL, BCRYPT_ALG_HANDLE_HMAC_FLAG); - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacSHA256, + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHmacSHA1 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacSHA256, BCRYPT_SHA256_ALGORITHM, NULL, BCRYPT_ALG_HANDLE_HMAC_FLAG); - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacSHA512, + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHmacSHA256 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacSHA384, + BCRYPT_SHA384_ALGORITHM, NULL, + BCRYPT_ALG_HANDLE_HMAC_FLAG); + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHmacSHA384 = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgHmacSHA512, BCRYPT_SHA512_ALGORITHM, NULL, BCRYPT_ALG_HANDLE_HMAC_FLAG); + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgHmacSHA512 = NULL; + } - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgRSA, + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgRSA, BCRYPT_RSA_ALGORITHM, NULL, 0); - (void)BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgDSA, + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgRSA = NULL; + } + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgDSA, BCRYPT_DSA_ALGORITHM, NULL, 0); + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgDSA = NULL; + } ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgAES_CBC, BCRYPT_AES_ALGORITHM, NULL, 0); - if (BCRYPT_SUCCESS(ret)) { - ret = BCryptSetProperty(_libssh2_wincng.hAlgAES_CBC, BCRYPT_CHAINING_MODE, + if(BCRYPT_SUCCESS(ret)) { + ret = BCryptSetProperty(_libssh2_wincng.hAlgAES_CBC, + BCRYPT_CHAINING_MODE, (PBYTE)BCRYPT_CHAIN_MODE_CBC, sizeof(BCRYPT_CHAIN_MODE_CBC), 0); - if (!BCRYPT_SUCCESS(ret)) { - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgAES_CBC, 0); + if(!BCRYPT_SUCCESS(ret)) { + ret = BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgAES_CBC, 0); + if(BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgAES_CBC = NULL; + } + } + } + + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgAES_ECB, + BCRYPT_AES_ALGORITHM, NULL, 0); + if(BCRYPT_SUCCESS(ret)) { + ret = BCryptSetProperty(_libssh2_wincng.hAlgAES_ECB, + BCRYPT_CHAINING_MODE, + (PBYTE)BCRYPT_CHAIN_MODE_ECB, + sizeof(BCRYPT_CHAIN_MODE_ECB), 0); + if(!BCRYPT_SUCCESS(ret)) { + ret = BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgAES_ECB, 0); + if(BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgAES_ECB = NULL; + } } } ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgRC4_NA, BCRYPT_RC4_ALGORITHM, NULL, 0); - if (BCRYPT_SUCCESS(ret)) { - ret = BCryptSetProperty(_libssh2_wincng.hAlgRC4_NA, BCRYPT_CHAINING_MODE, + if(BCRYPT_SUCCESS(ret)) { + ret = BCryptSetProperty(_libssh2_wincng.hAlgRC4_NA, + BCRYPT_CHAINING_MODE, (PBYTE)BCRYPT_CHAIN_MODE_NA, sizeof(BCRYPT_CHAIN_MODE_NA), 0); - if (!BCRYPT_SUCCESS(ret)) { - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRC4_NA, 0); + if(!BCRYPT_SUCCESS(ret)) { + ret = BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRC4_NA, 0); + if(BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgRC4_NA = NULL; + } } } ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlg3DES_CBC, BCRYPT_3DES_ALGORITHM, NULL, 0); - if (BCRYPT_SUCCESS(ret)) { - ret = BCryptSetProperty(_libssh2_wincng.hAlg3DES_CBC, BCRYPT_CHAINING_MODE, + if(BCRYPT_SUCCESS(ret)) { + ret = BCryptSetProperty(_libssh2_wincng.hAlg3DES_CBC, + BCRYPT_CHAINING_MODE, (PBYTE)BCRYPT_CHAIN_MODE_CBC, sizeof(BCRYPT_CHAIN_MODE_CBC), 0); - if (!BCRYPT_SUCCESS(ret)) { - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlg3DES_CBC, 0); + if(!BCRYPT_SUCCESS(ret)) { + ret = BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlg3DES_CBC, + 0); + if(BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlg3DES_CBC = NULL; + } } } + + ret = BCryptOpenAlgorithmProvider(&_libssh2_wincng.hAlgDH, + BCRYPT_DH_ALGORITHM, NULL, 0); + if(!BCRYPT_SUCCESS(ret)) { + _libssh2_wincng.hAlgDH = NULL; + } } void _libssh2_wincng_free(void) { - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRNG, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashMD5, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashSHA1, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashSHA256, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashSHA512, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacMD5, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacSHA1, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacSHA256, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacSHA512, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRSA, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgDSA, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgAES_CBC, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRC4_NA, 0); - (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlg3DES_CBC, 0); + if(_libssh2_wincng.hAlgRNG) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRNG, 0); + if(_libssh2_wincng.hAlgHashMD5) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashMD5, 0); + if(_libssh2_wincng.hAlgHashSHA1) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashSHA1, 0); + if(_libssh2_wincng.hAlgHashSHA256) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashSHA256, 0); + if(_libssh2_wincng.hAlgHashSHA384) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashSHA384, 0); + if(_libssh2_wincng.hAlgHashSHA512) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHashSHA512, 0); + if(_libssh2_wincng.hAlgHmacMD5) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacMD5, 0); + if(_libssh2_wincng.hAlgHmacSHA1) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacSHA1, 0); + if(_libssh2_wincng.hAlgHmacSHA256) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacSHA256, 0); + if(_libssh2_wincng.hAlgHmacSHA384) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacSHA384, 0); + if(_libssh2_wincng.hAlgHmacSHA512) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgHmacSHA512, 0); + if(_libssh2_wincng.hAlgRSA) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRSA, 0); + if(_libssh2_wincng.hAlgDSA) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgDSA, 0); + if(_libssh2_wincng.hAlgAES_CBC) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgAES_CBC, 0); + if(_libssh2_wincng.hAlgRC4_NA) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgRC4_NA, 0); + if(_libssh2_wincng.hAlg3DES_CBC) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlg3DES_CBC, 0); + if(_libssh2_wincng.hAlgDH) + (void)BCryptCloseAlgorithmProvider(_libssh2_wincng.hAlgDH, 0); memset(&_libssh2_wincng, 0, sizeof(_libssh2_wincng)); } @@ -314,17 +431,35 @@ _libssh2_wincng_safe_free(void *buf, int len) (void)len; #endif - if (!buf) + if(!buf) return; #ifdef LIBSSH2_CLEAR_MEMORY - if (len > 0) + if(len > 0) SecureZeroMemory(buf, len); #endif free(buf); } +/* Copy a big endian set of bits from src to dest. + * if the size of src is smaller than dest then pad the "left" (MSB) + * end with zeroes and copy the bits into the "right" (LSB) end. */ +static void +memcpy_with_be_padding(unsigned char *dest, unsigned long dest_len, + unsigned char *src, unsigned long src_len) +{ + if(dest_len > src_len) { + memset(dest, 0, dest_len - src_len); + } + memcpy((dest + dest_len) - src_len, src, src_len); +} + +static int +round_down(int number, int multiple) +{ + return (number / multiple) * multiple; +} /*******************************************************************/ /* @@ -345,7 +480,7 @@ _libssh2_wincng_hash_init(_libssh2_wincng_hash_ctx *ctx, (unsigned char *)&dwHash, sizeof(dwHash), &cbData, 0); - if ((!BCRYPT_SUCCESS(ret)) || dwHash != hashlen) { + if((!BCRYPT_SUCCESS(ret)) || dwHash != hashlen) { return -1; } @@ -353,12 +488,12 @@ _libssh2_wincng_hash_init(_libssh2_wincng_hash_ctx *ctx, (unsigned char *)&dwHashObject, sizeof(dwHashObject), &cbData, 0); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { return -1; } pbHashObject = malloc(dwHashObject); - if (!pbHashObject) { + if(!pbHashObject) { return -1; } @@ -366,7 +501,7 @@ _libssh2_wincng_hash_init(_libssh2_wincng_hash_ctx *ctx, ret = BCryptCreateHash(hAlg, &hHash, pbHashObject, dwHashObject, key, keylen, 0); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { _libssh2_wincng_safe_free(pbHashObject, dwHashObject); return -1; } @@ -418,7 +553,7 @@ _libssh2_wincng_hash(unsigned char *data, unsigned long datalen, int ret; ret = _libssh2_wincng_hash_init(&ctx, hAlg, hashlen, NULL, 0); - if (!ret) { + if(!ret) { ret = _libssh2_wincng_hash_update(&ctx, data, datalen); ret |= _libssh2_wincng_hash_final(&ctx, hash); } @@ -476,13 +611,13 @@ _libssh2_wincng_key_sha1_verify(_libssh2_wincng_key_ctx *ctx, datalen = m_len; data = malloc(datalen); - if (!data) { + if(!data) { return -1; } hashlen = SHA_DIGEST_LENGTH; hash = malloc(hashlen); - if (!hash) { + if(!hash) { free(data); return -1; } @@ -495,22 +630,23 @@ _libssh2_wincng_key_sha1_verify(_libssh2_wincng_key_ctx *ctx, _libssh2_wincng_safe_free(data, datalen); - if (ret) { + if(ret) { _libssh2_wincng_safe_free(hash, hashlen); return -1; } datalen = sig_len; data = malloc(datalen); - if (!data) { + if(!data) { _libssh2_wincng_safe_free(hash, hashlen); return -1; } - if (flags & BCRYPT_PAD_PKCS1) { + if(flags & BCRYPT_PAD_PKCS1) { paddingInfoPKCS1.pszAlgId = BCRYPT_SHA1_ALGORITHM; pPaddingInfo = &paddingInfoPKCS1; - } else + } + else pPaddingInfo = NULL; memcpy(data, sig, datalen); @@ -537,14 +673,13 @@ _libssh2_wincng_load_pem(LIBSSH2_SESSION *session, FILE *fp; int ret; - (void)passphrase; - - fp = fopen(filename, "r"); - if (!fp) { + fp = fopen(filename, FOPEN_READTEXT); + if(!fp) { return -1; } ret = _libssh2_pem_parse(session, headerbegin, headerend, + passphrase, fp, data, datalen); fclose(fp); @@ -564,19 +699,19 @@ _libssh2_wincng_load_private(LIBSSH2_SESSION *session, unsigned int datalen = 0; int ret = -1; - if (ret && tryLoadRSA) { + if(ret && tryLoadRSA) { ret = _libssh2_wincng_load_pem(session, filename, passphrase, PEM_RSA_HEADER, PEM_RSA_FOOTER, &data, &datalen); } - if (ret && tryLoadDSA) { + if(ret && tryLoadDSA) { ret = _libssh2_wincng_load_pem(session, filename, passphrase, PEM_DSA_HEADER, PEM_DSA_FOOTER, &data, &datalen); } - if (!ret) { + if(!ret) { *ppbEncoded = data; *pcbEncoded = datalen; } @@ -599,21 +734,21 @@ _libssh2_wincng_load_private_memory(LIBSSH2_SESSION *session, (void)passphrase; - if (ret && tryLoadRSA) { + if(ret && tryLoadRSA) { ret = _libssh2_pem_parse_memory(session, PEM_RSA_HEADER, PEM_RSA_FOOTER, privatekeydata, privatekeydata_len, &data, &datalen); } - if (ret && tryLoadDSA) { + if(ret && tryLoadDSA) { ret = _libssh2_pem_parse_memory(session, PEM_DSA_HEADER, PEM_DSA_FOOTER, privatekeydata, privatekeydata_len, &data, &datalen); } - if (!ret) { + if(!ret) { *ppbEncoded = data; *pcbEncoded = datalen; } @@ -636,12 +771,12 @@ _libssh2_wincng_asn_decode(unsigned char *pbEncoded, lpszStructType, pbEncoded, cbEncoded, 0, NULL, NULL, &cbDecoded); - if (!ret) { + if(!ret) { return -1; } pbDecoded = malloc(cbDecoded); - if (!pbDecoded) { + if(!pbDecoded) { return -1; } @@ -649,7 +784,7 @@ _libssh2_wincng_asn_decode(unsigned char *pbEncoded, lpszStructType, pbEncoded, cbEncoded, 0, NULL, pbDecoded, &cbDecoded); - if (!ret) { + if(!ret) { _libssh2_wincng_safe_free(pbDecoded, cbDecoded); return -1; } @@ -670,25 +805,25 @@ _libssh2_wincng_bn_ltob(unsigned char *pbInput, unsigned char *pbOutput; unsigned long cbOutput, index, offset, length; - if (cbInput < 1) { + if(cbInput < 1) { return 0; } offset = 0; length = cbInput - 1; cbOutput = cbInput; - if (pbInput[length] & (1 << 7)) { + if(pbInput[length] & (1 << 7)) { offset++; cbOutput += offset; } pbOutput = (unsigned char *)malloc(cbOutput); - if (!pbOutput) { + if(!pbOutput) { return -1; } pbOutput[0] = 0; - for (index = 0; ((index + offset) < cbOutput) + for(index = 0; ((index + offset) < cbOutput) && (index < cbInput); index++) { pbOutput[index + offset] = pbInput[length - index]; } @@ -713,11 +848,11 @@ _libssh2_wincng_asn_decode_bn(unsigned char *pbEncoded, ret = _libssh2_wincng_asn_decode(pbEncoded, cbEncoded, X509_MULTI_BYTE_UINT, &pbInteger, &cbInteger); - if (!ret) { + if(!ret) { ret = _libssh2_wincng_bn_ltob(((PCRYPT_DATA_BLOB)pbInteger)->pbData, ((PCRYPT_DATA_BLOB)pbInteger)->cbData, &pbDecoded, &cbDecoded); - if (!ret) { + if(!ret) { *ppbDecoded = pbDecoded; *pcbDecoded = cbDecoded; } @@ -742,30 +877,31 @@ _libssh2_wincng_asn_decode_bns(unsigned char *pbEncoded, ret = _libssh2_wincng_asn_decode(pbEncoded, cbEncoded, X509_SEQUENCE_OF_ANY, &pbDecoded, &cbDecoded); - if (!ret) { + if(!ret) { length = ((PCRYPT_DATA_BLOB)pbDecoded)->cbData; rpbDecoded = malloc(sizeof(PBYTE) * length); - if (rpbDecoded) { + if(rpbDecoded) { rcbDecoded = malloc(sizeof(DWORD) * length); - if (rcbDecoded) { - for (index = 0; index < length; index++) { + if(rcbDecoded) { + for(index = 0; index < length; index++) { pBlob = &((PCRYPT_DER_BLOB) ((PCRYPT_DATA_BLOB)pbDecoded)->pbData)[index]; ret = _libssh2_wincng_asn_decode_bn(pBlob->pbData, pBlob->cbData, &rpbDecoded[index], &rcbDecoded[index]); - if (ret) + if(ret) break; } - if (!ret) { + if(!ret) { *prpbDecoded = rpbDecoded; *prcbDecoded = rcbDecoded; *pcbCount = length; - } else { - for (length = 0; length < index; length++) { + } + else { + for(length = 0; length < index; length++) { _libssh2_wincng_safe_free(rpbDecoded[length], rcbDecoded[length]); rpbDecoded[length] = NULL; @@ -774,11 +910,13 @@ _libssh2_wincng_asn_decode_bns(unsigned char *pbEncoded, free(rpbDecoded); free(rcbDecoded); } - } else { + } + else { free(rpbDecoded); ret = -1; } - } else { + } + else { ret = -1; } @@ -795,13 +933,13 @@ _libssh2_wincng_bn_size(const unsigned char *bignum, { unsigned long offset; - if (!bignum) + if(!bignum) return 0; length--; offset = 0; - while (!(*(bignum + offset)) && (offset < length)) + while(!(*(bignum + offset)) && (offset < length)) offset++; length++; @@ -845,7 +983,7 @@ _libssh2_wincng_rsa_new(libssh2_rsa_ctx **rsa, _libssh2_wincng_bn_size(ddata, dlen)); offset = sizeof(BCRYPT_RSAKEY_BLOB); keylen = offset + elen + mlen; - if (ddata && dlen > 0) { + if(ddata && dlen > 0) { p1len = max(_libssh2_wincng_bn_size(pdata, plen), _libssh2_wincng_bn_size(e1data, e1len)); p2len = max(_libssh2_wincng_bn_size(qdata, qlen), @@ -854,7 +992,7 @@ _libssh2_wincng_rsa_new(libssh2_rsa_ctx **rsa, } key = malloc(keylen); - if (!key) { + if(!key) { return -1; } @@ -870,45 +1008,45 @@ _libssh2_wincng_rsa_new(libssh2_rsa_ctx **rsa, memcpy(key + offset, edata, elen); offset += elen; - if (nlen < mlen) + if(nlen < mlen) memcpy(key + offset + mlen - nlen, ndata, nlen); else memcpy(key + offset, ndata + nlen - mlen, mlen); - if (ddata && dlen > 0) { + if(ddata && dlen > 0) { offset += mlen; - if (plen < p1len) + if(plen < p1len) memcpy(key + offset + p1len - plen, pdata, plen); else memcpy(key + offset, pdata + plen - p1len, p1len); offset += p1len; - if (qlen < p2len) + if(qlen < p2len) memcpy(key + offset + p2len - qlen, qdata, qlen); else memcpy(key + offset, qdata + qlen - p2len, p2len); offset += p2len; - if (e1len < p1len) + if(e1len < p1len) memcpy(key + offset + p1len - e1len, e1data, e1len); else memcpy(key + offset, e1data + e1len - p1len, p1len); offset += p1len; - if (e2len < p2len) + if(e2len < p2len) memcpy(key + offset + p2len - e2len, e2data, e2len); else memcpy(key + offset, e2data + e2len - p2len, p2len); offset += p2len; - if (coefflen < p1len) + if(coefflen < p1len) memcpy(key + offset + p1len - coefflen, coeffdata, coefflen); else memcpy(key + offset, coeffdata + coefflen - p1len, p1len); offset += p1len; - if (dlen < mlen) + if(dlen < mlen) memcpy(key + offset + mlen - dlen, ddata, dlen); else memcpy(key + offset, ddata + dlen - mlen, mlen); @@ -917,7 +1055,8 @@ _libssh2_wincng_rsa_new(libssh2_rsa_ctx **rsa, rsakey->Magic = BCRYPT_RSAFULLPRIVATE_MAGIC; rsakey->cbPrime1 = p1len; rsakey->cbPrime2 = p2len; - } else { + } + else { lpszBlobType = BCRYPT_RSAPUBLIC_BLOB; rsakey->Magic = BCRYPT_RSAPUBLIC_MAGIC; rsakey->cbPrime1 = 0; @@ -927,14 +1066,14 @@ _libssh2_wincng_rsa_new(libssh2_rsa_ctx **rsa, ret = BCryptImportKeyPair(_libssh2_wincng.hAlgRSA, NULL, lpszBlobType, &hKey, key, keylen, 0); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { _libssh2_wincng_safe_free(key, keylen); return -1; } *rsa = malloc(sizeof(libssh2_rsa_ctx)); - if (!(*rsa)) { + if(!(*rsa)) { BCryptDestroyKey(hKey); _libssh2_wincng_safe_free(key, keylen); return -1; @@ -967,7 +1106,7 @@ _libssh2_wincng_rsa_new_private_parse(libssh2_rsa_ctx **rsa, _libssh2_wincng_safe_free(pbEncoded, cbEncoded); - if (ret) { + if(ret) { return -1; } @@ -975,14 +1114,14 @@ _libssh2_wincng_rsa_new_private_parse(libssh2_rsa_ctx **rsa, ret = BCryptImportKeyPair(_libssh2_wincng.hAlgRSA, NULL, LEGACY_RSAPRIVATE_BLOB, &hKey, pbStructInfo, cbStructInfo, 0); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { _libssh2_wincng_safe_free(pbStructInfo, cbStructInfo); return -1; } *rsa = malloc(sizeof(libssh2_rsa_ctx)); - if (!(*rsa)) { + if(!(*rsa)) { BCryptDestroyKey(hKey); _libssh2_wincng_safe_free(pbStructInfo, cbStructInfo); return -1; @@ -1012,7 +1151,7 @@ _libssh2_wincng_rsa_new_private(libssh2_rsa_ctx **rsa, ret = _libssh2_wincng_load_private(session, filename, (const char *)passphrase, &pbEncoded, &cbEncoded, 1, 0); - if (ret) { + if(ret) { return -1; } @@ -1046,7 +1185,7 @@ _libssh2_wincng_rsa_new_private_frommemory(libssh2_rsa_ctx **rsa, ret = _libssh2_wincng_load_private_memory(session, filedata, filedata_len, (const char *)passphrase, &pbEncoded, &cbEncoded, 1, 0); - if (ret) { + if(ret) { return -1; } @@ -1090,7 +1229,7 @@ _libssh2_wincng_rsa_sha1_sign(LIBSSH2_SESSION *session, datalen = (unsigned long)hash_len; data = malloc(datalen); - if (!data) { + if(!data) { return -1; } @@ -1101,20 +1240,22 @@ _libssh2_wincng_rsa_sha1_sign(LIBSSH2_SESSION *session, ret = BCryptSignHash(rsa->hKey, &paddingInfo, data, datalen, NULL, 0, &cbData, BCRYPT_PAD_PKCS1); - if (BCRYPT_SUCCESS(ret)) { + if(BCRYPT_SUCCESS(ret)) { siglen = cbData; sig = LIBSSH2_ALLOC(session, siglen); - if (sig) { + if(sig) { ret = BCryptSignHash(rsa->hKey, &paddingInfo, data, datalen, sig, siglen, &cbData, BCRYPT_PAD_PKCS1); - if (BCRYPT_SUCCESS(ret)) { + if(BCRYPT_SUCCESS(ret)) { *signature_len = siglen; *signature = sig; - } else { + } + else { LIBSSH2_FREE(session, sig); } - } else + } + else ret = STATUS_NO_MEMORY; } @@ -1126,7 +1267,7 @@ _libssh2_wincng_rsa_sha1_sign(LIBSSH2_SESSION *session, void _libssh2_wincng_rsa_free(libssh2_rsa_ctx *rsa) { - if (!rsa) + if(!rsa) return; BCryptDestroyKey(rsa->hKey); @@ -1168,11 +1309,11 @@ _libssh2_wincng_dsa_new(libssh2_dsa_ctx **dsa, _libssh2_wincng_bn_size(ydata, ylen)); offset = sizeof(BCRYPT_DSA_KEY_BLOB); keylen = offset + length * 3; - if (xdata && xlen > 0) + if(xdata && xlen > 0) keylen += 20; key = malloc(keylen); - if (!key) { + if(!key) { return -1; } @@ -1186,39 +1327,40 @@ _libssh2_wincng_dsa_new(libssh2_dsa_ctx **dsa, memset(dsakey->Count, -1, sizeof(dsakey->Count)); memset(dsakey->Seed, -1, sizeof(dsakey->Seed)); - if (qlen < 20) + if(qlen < 20) memcpy(dsakey->q + 20 - qlen, qdata, qlen); else memcpy(dsakey->q, qdata + qlen - 20, 20); - if (plen < length) + if(plen < length) memcpy(key + offset + length - plen, pdata, plen); else memcpy(key + offset, pdata + plen - length, length); offset += length; - if (glen < length) + if(glen < length) memcpy(key + offset + length - glen, gdata, glen); else memcpy(key + offset, gdata + glen - length, length); offset += length; - if (ylen < length) + if(ylen < length) memcpy(key + offset + length - ylen, ydata, ylen); else memcpy(key + offset, ydata + ylen - length, length); - if (xdata && xlen > 0) { + if(xdata && xlen > 0) { offset += length; - if (xlen < 20) + if(xlen < 20) memcpy(key + offset + 20 - xlen, xdata, xlen); else memcpy(key + offset, xdata + xlen - 20, 20); lpszBlobType = BCRYPT_DSA_PRIVATE_BLOB; dsakey->dwMagic = BCRYPT_DSA_PRIVATE_MAGIC; - } else { + } + else { lpszBlobType = BCRYPT_DSA_PUBLIC_BLOB; dsakey->dwMagic = BCRYPT_DSA_PUBLIC_MAGIC; } @@ -1226,14 +1368,14 @@ _libssh2_wincng_dsa_new(libssh2_dsa_ctx **dsa, ret = BCryptImportKeyPair(_libssh2_wincng.hAlgDSA, NULL, lpszBlobType, &hKey, key, keylen, 0); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { _libssh2_wincng_safe_free(key, keylen); return -1; } *dsa = malloc(sizeof(libssh2_dsa_ctx)); - if (!(*dsa)) { + if(!(*dsa)) { BCryptDestroyKey(hKey); _libssh2_wincng_safe_free(key, keylen); return -1; @@ -1264,23 +1406,24 @@ _libssh2_wincng_dsa_new_private_parse(libssh2_dsa_ctx **dsa, _libssh2_wincng_safe_free(pbEncoded, cbEncoded); - if (ret) { + if(ret) { return -1; } - if (length == 6) { + if(length == 6) { ret = _libssh2_wincng_dsa_new(dsa, rpbDecoded[1], rcbDecoded[1], rpbDecoded[2], rcbDecoded[2], rpbDecoded[3], rcbDecoded[3], rpbDecoded[4], rcbDecoded[4], rpbDecoded[5], rcbDecoded[5]); - } else { + } + else { ret = -1; } - for (index = 0; index < length; index++) { + for(index = 0; index < length; index++) { _libssh2_wincng_safe_free(rpbDecoded[index], rcbDecoded[index]); rpbDecoded[index] = NULL; rcbDecoded[index] = 0; @@ -1307,7 +1450,7 @@ _libssh2_wincng_dsa_new_private(libssh2_dsa_ctx **dsa, ret = _libssh2_wincng_load_private(session, filename, (const char *)passphrase, &pbEncoded, &cbEncoded, 0, 1); - if (ret) { + if(ret) { return -1; } @@ -1339,7 +1482,7 @@ _libssh2_wincng_dsa_new_private_frommemory(libssh2_dsa_ctx **dsa, ret = _libssh2_wincng_load_private_memory(session, filedata, filedata_len, (const char *)passphrase, &pbEncoded, &cbEncoded, 0, 1); - if (ret) { + if(ret) { return -1; } @@ -1378,7 +1521,7 @@ _libssh2_wincng_dsa_sha1_sign(libssh2_dsa_ctx *dsa, datalen = hash_len; data = malloc(datalen); - if (!data) { + if(!data) { return -1; } @@ -1386,21 +1529,23 @@ _libssh2_wincng_dsa_sha1_sign(libssh2_dsa_ctx *dsa, ret = BCryptSignHash(dsa->hKey, NULL, data, datalen, NULL, 0, &cbData, 0); - if (BCRYPT_SUCCESS(ret)) { + if(BCRYPT_SUCCESS(ret)) { siglen = cbData; - if (siglen == 40) { + if(siglen == 40) { sig = malloc(siglen); - if (sig) { + if(sig) { ret = BCryptSignHash(dsa->hKey, NULL, data, datalen, sig, siglen, &cbData, 0); - if (BCRYPT_SUCCESS(ret)) { + if(BCRYPT_SUCCESS(ret)) { memcpy(sig_fixed, sig, siglen); } _libssh2_wincng_safe_free(sig, siglen); - } else + } + else ret = STATUS_NO_MEMORY; - } else + } + else ret = STATUS_NO_MEMORY; } @@ -1412,7 +1557,7 @@ _libssh2_wincng_dsa_sha1_sign(libssh2_dsa_ctx *dsa, void _libssh2_wincng_dsa_free(libssh2_dsa_ctx *dsa) { - if (!dsa) + if(!dsa) return; BCryptDestroyKey(dsa->hKey); @@ -1466,24 +1611,25 @@ _libssh2_wincng_pub_priv_keyfile_parse(LIBSSH2_SESSION *session, _libssh2_wincng_safe_free(pbEncoded, cbEncoded); - if (ret) { + if(ret) { return -1; } - if (length == 9) { /* private RSA key */ + if(length == 9) { /* private RSA key */ mthlen = 7; mth = LIBSSH2_ALLOC(session, mthlen); - if (mth) { + if(mth) { memcpy(mth, "ssh-rsa", mthlen); - } else { + } + else { ret = -1; } keylen = 4 + mthlen + 4 + rcbDecoded[2] + 4 + rcbDecoded[1]; key = LIBSSH2_ALLOC(session, keylen); - if (key) { + if(key) { offset = _libssh2_wincng_pub_priv_write(key, 0, mth, mthlen); offset = _libssh2_wincng_pub_priv_write(key, offset, @@ -1493,23 +1639,26 @@ _libssh2_wincng_pub_priv_keyfile_parse(LIBSSH2_SESSION *session, _libssh2_wincng_pub_priv_write(key, offset, rpbDecoded[1], rcbDecoded[1]); - } else { + } + else { ret = -1; } - } else if (length == 6) { /* private DSA key */ + } + else if(length == 6) { /* private DSA key */ mthlen = 7; mth = LIBSSH2_ALLOC(session, mthlen); - if (mth) { + if(mth) { memcpy(mth, "ssh-dss", mthlen); - } else { + } + else { ret = -1; } keylen = 4 + mthlen + 4 + rcbDecoded[1] + 4 + rcbDecoded[2] + 4 + rcbDecoded[3] + 4 + rcbDecoded[4]; key = LIBSSH2_ALLOC(session, keylen); - if (key) { + if(key) { offset = _libssh2_wincng_pub_priv_write(key, 0, mth, mthlen); offset = _libssh2_wincng_pub_priv_write(key, offset, @@ -1527,16 +1676,18 @@ _libssh2_wincng_pub_priv_keyfile_parse(LIBSSH2_SESSION *session, _libssh2_wincng_pub_priv_write(key, offset, rpbDecoded[4], rcbDecoded[4]); - } else { + } + else { ret = -1; } - } else { + } + else { ret = -1; } - for (index = 0; index < length; index++) { + for(index = 0; index < length; index++) { _libssh2_wincng_safe_free(rpbDecoded[index], rcbDecoded[index]); rpbDecoded[index] = NULL; rcbDecoded[index] = 0; @@ -1546,12 +1697,13 @@ _libssh2_wincng_pub_priv_keyfile_parse(LIBSSH2_SESSION *session, free(rcbDecoded); - if (ret) { - if (mth) + if(ret) { + if(mth) LIBSSH2_FREE(session, mth); - if (key) + if(key) LIBSSH2_FREE(session, key); - } else { + } + else { *method = mth; *method_len = mthlen; *pubkeydata = key; @@ -1578,7 +1730,7 @@ _libssh2_wincng_pub_priv_keyfile(LIBSSH2_SESSION *session, ret = _libssh2_wincng_load_private(session, privatekey, passphrase, &pbEncoded, &cbEncoded, 1, 1); - if (ret) { + if(ret) { return -1; } @@ -1617,7 +1769,7 @@ _libssh2_wincng_pub_priv_keyfilememory(LIBSSH2_SESSION *session, ret = _libssh2_wincng_load_private_memory(session, privatekeydata, privatekeydata_len, passphrase, &pbEncoded, &cbEncoded, 1, 1); - if (ret) { + if(ret) { return -1; } @@ -1634,8 +1786,8 @@ _libssh2_wincng_pub_priv_keyfilememory(LIBSSH2_SESSION *session, (void)passphrase; return _libssh2_error(session, LIBSSH2_ERROR_METHOD_NOT_SUPPORTED, - "Unable to extract public key from private key in memory: " - "Method unsupported in Windows CNG backend"); + "Unable to extract public key from private key in memory: " + "Method unsupported in Windows CNG backend"); #endif /* HAVE_LIBCRYPT32 */ } @@ -1653,8 +1805,9 @@ _libssh2_wincng_cipher_init(_libssh2_cipher_ctx *ctx, { BCRYPT_KEY_HANDLE hKey; BCRYPT_KEY_DATA_BLOB_HEADER *header; - unsigned char *pbKeyObject, *pbIV, *key; - unsigned long dwKeyObject, dwIV, dwBlockLength, cbData, keylen; + unsigned char *pbKeyObject, *pbIV, *key, *pbCtr, *pbIVCopy; + unsigned long dwKeyObject, dwIV, dwCtrLength, dwBlockLength, + cbData, keylen; int ret; (void)encrypt; @@ -1663,7 +1816,7 @@ _libssh2_wincng_cipher_init(_libssh2_cipher_ctx *ctx, (unsigned char *)&dwKeyObject, sizeof(dwKeyObject), &cbData, 0); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { return -1; } @@ -1671,19 +1824,19 @@ _libssh2_wincng_cipher_init(_libssh2_cipher_ctx *ctx, (unsigned char *)&dwBlockLength, sizeof(dwBlockLength), &cbData, 0); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { return -1; } pbKeyObject = malloc(dwKeyObject); - if (!pbKeyObject) { + if(!pbKeyObject) { return -1; } keylen = sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + type.dwKeyLength; key = malloc(keylen); - if (!key) { + if(!key) { free(pbKeyObject); return -1; } @@ -1702,36 +1855,46 @@ _libssh2_wincng_cipher_init(_libssh2_cipher_ctx *ctx, _libssh2_wincng_safe_free(key, keylen); - if (!BCRYPT_SUCCESS(ret)) { + if(!BCRYPT_SUCCESS(ret)) { _libssh2_wincng_safe_free(pbKeyObject, dwKeyObject); return -1; } - if (type.dwUseIV) { - pbIV = malloc(dwBlockLength); - if (!pbIV) { + pbIV = NULL; + pbCtr = NULL; + dwIV = 0; + dwCtrLength = 0; + + if(type.useIV || type.ctrMode) { + pbIVCopy = malloc(dwBlockLength); + if(!pbIVCopy) { BCryptDestroyKey(hKey); _libssh2_wincng_safe_free(pbKeyObject, dwKeyObject); return -1; } - dwIV = dwBlockLength; - memcpy(pbIV, iv, dwIV); - } else { - pbIV = NULL; - dwIV = 0; - } + memcpy(pbIVCopy, iv, dwBlockLength); + if(type.ctrMode) { + pbCtr = pbIVCopy; + dwCtrLength = dwBlockLength; + } + else if(type.useIV) { + pbIV = pbIVCopy; + dwIV = dwBlockLength; + } + } ctx->hKey = hKey; ctx->pbKeyObject = pbKeyObject; ctx->pbIV = pbIV; + ctx->pbCtr = pbCtr; ctx->dwKeyObject = dwKeyObject; ctx->dwIV = dwIV; ctx->dwBlockLength = dwBlockLength; + ctx->dwCtrLength = dwCtrLength; return 0; } - int _libssh2_wincng_cipher_crypt(_libssh2_cipher_ctx *ctx, _libssh2_cipher_type(type), @@ -1739,7 +1902,7 @@ _libssh2_wincng_cipher_crypt(_libssh2_cipher_ctx *ctx, unsigned char *block, size_t blocklen) { - unsigned char *pbOutput; + unsigned char *pbOutput, *pbInput; unsigned long cbOutput, cbInput; int ret; @@ -1747,31 +1910,47 @@ _libssh2_wincng_cipher_crypt(_libssh2_cipher_ctx *ctx, cbInput = (unsigned long)blocklen; - if (encrypt) { - ret = BCryptEncrypt(ctx->hKey, block, cbInput, NULL, + if(type.ctrMode) { + pbInput = ctx->pbCtr; + } + else { + pbInput = block; + } + + if(encrypt || type.ctrMode) { + ret = BCryptEncrypt(ctx->hKey, pbInput, cbInput, NULL, ctx->pbIV, ctx->dwIV, NULL, 0, &cbOutput, 0); - } else { - ret = BCryptDecrypt(ctx->hKey, block, cbInput, NULL, + } + else { + ret = BCryptDecrypt(ctx->hKey, pbInput, cbInput, NULL, ctx->pbIV, ctx->dwIV, NULL, 0, &cbOutput, 0); } - if (BCRYPT_SUCCESS(ret)) { + if(BCRYPT_SUCCESS(ret)) { pbOutput = malloc(cbOutput); - if (pbOutput) { - if (encrypt) { - ret = BCryptEncrypt(ctx->hKey, block, cbInput, NULL, + if(pbOutput) { + if(encrypt || type.ctrMode) { + ret = BCryptEncrypt(ctx->hKey, pbInput, cbInput, NULL, ctx->pbIV, ctx->dwIV, pbOutput, cbOutput, &cbOutput, 0); - } else { - ret = BCryptDecrypt(ctx->hKey, block, cbInput, NULL, + } + else { + ret = BCryptDecrypt(ctx->hKey, pbInput, cbInput, NULL, ctx->pbIV, ctx->dwIV, pbOutput, cbOutput, &cbOutput, 0); } - if (BCRYPT_SUCCESS(ret)) { - memcpy(block, pbOutput, cbOutput); + if(BCRYPT_SUCCESS(ret)) { + if(type.ctrMode) { + _libssh2_xor_data(block, block, pbOutput, blocklen); + _libssh2_aes_ctr_increment(ctx->pbCtr, ctx->dwCtrLength); + } + else { + memcpy(block, pbOutput, cbOutput); + } } _libssh2_wincng_safe_free(pbOutput, cbOutput); - } else + } + else ret = STATUS_NO_MEMORY; } @@ -1791,6 +1970,10 @@ _libssh2_wincng_cipher_dtor(_libssh2_cipher_ctx *ctx) _libssh2_wincng_safe_free(ctx->pbIV, ctx->dwBlockLength); ctx->pbIV = NULL; ctx->dwBlockLength = 0; + + _libssh2_wincng_safe_free(ctx->pbCtr, ctx->dwCtrLength); + ctx->pbCtr = NULL; + ctx->dwCtrLength = 0; } @@ -1805,7 +1988,7 @@ _libssh2_wincng_bignum_init(void) _libssh2_bn *bignum; bignum = (_libssh2_bn *)malloc(sizeof(_libssh2_bn)); - if (bignum) { + if(bignum) { bignum->bignum = NULL; bignum->length = 0; } @@ -1818,20 +2001,20 @@ _libssh2_wincng_bignum_resize(_libssh2_bn *bn, unsigned long length) { unsigned char *bignum; - if (!bn) + if(!bn) return -1; - if (length == bn->length) + if(length == bn->length) return 0; #ifdef LIBSSH2_CLEAR_MEMORY - if (bn->bignum && bn->length > 0 && length < bn->length) { + if(bn->bignum && bn->length > 0 && length < bn->length) { SecureZeroMemory(bn->bignum + length, bn->length - length); } #endif bignum = realloc(bn->bignum, length); - if (!bignum) + if(!bignum) return -1; bn->bignum = bignum; @@ -1840,49 +2023,51 @@ _libssh2_wincng_bignum_resize(_libssh2_bn *bn, unsigned long length) return 0; } -int +static int _libssh2_wincng_bignum_rand(_libssh2_bn *rnd, int bits, int top, int bottom) { unsigned char *bignum; unsigned long length; - if (!rnd) + if(!rnd) return -1; - length = (unsigned long)(ceil((float)bits / 8) * sizeof(unsigned char)); - if (_libssh2_wincng_bignum_resize(rnd, length)) + length = (unsigned long) (ceil(((double)bits) / 8.0) * + sizeof(unsigned char)); + if(_libssh2_wincng_bignum_resize(rnd, length)) return -1; bignum = rnd->bignum; - if (_libssh2_wincng_random(bignum, length)) + if(_libssh2_wincng_random(bignum, length)) return -1; /* calculate significant bits in most significant byte */ bits %= 8; + if(bits == 0) + bits = 8; /* fill most significant byte with zero padding */ - bignum[0] &= (1 << (8 - bits)) - 1; + bignum[0] &= ((1 << bits) - 1); - /* set some special last bits in most significant byte */ - if (top == 0) - bignum[0] |= (1 << (7 - bits)); - else if (top == 1) - bignum[0] |= (3 << (6 - bits)); + /* set most significant bits in most significant byte */ + if(top == 0) + bignum[0] |= (1 << (bits - 1)); + else if(top == 1) + bignum[0] |= (3 << (bits - 2)); /* make odd by setting first bit in least significant byte */ - if (bottom) + if(bottom) bignum[length - 1] |= 1; return 0; } -int +static int _libssh2_wincng_bignum_mod_exp(_libssh2_bn *r, _libssh2_bn *a, _libssh2_bn *p, - _libssh2_bn *m, - _libssh2_bn_ctx *bnctx) + _libssh2_bn *m) { BCRYPT_KEY_HANDLE hKey; BCRYPT_RSAKEY_BLOB *rsakey; @@ -1890,16 +2075,14 @@ _libssh2_wincng_bignum_mod_exp(_libssh2_bn *r, unsigned long keylen, offset, length; int ret; - (void)bnctx; - - if (!r || !a || !p || !m) + if(!r || !a || !p || !m) return -1; offset = sizeof(BCRYPT_RSAKEY_BLOB); keylen = offset + p->length + m->length; key = malloc(keylen); - if (!key) + if(!key) return -1; @@ -1916,22 +2099,20 @@ _libssh2_wincng_bignum_mod_exp(_libssh2_bn *r, offset += p->length; memcpy(key + offset, m->bignum, m->length); + offset = 0; ret = BCryptImportKeyPair(_libssh2_wincng.hAlgRSA, NULL, - BCRYPT_RSAPUBLIC_BLOB, &hKey, key, keylen, - BCRYPT_NO_KEY_VALIDATION); - - if (BCRYPT_SUCCESS(ret)) { + BCRYPT_RSAPUBLIC_BLOB, &hKey, key, keylen, 0); + if(BCRYPT_SUCCESS(ret)) { ret = BCryptEncrypt(hKey, a->bignum, a->length, NULL, NULL, 0, NULL, 0, &length, BCRYPT_PAD_NONE); - if (BCRYPT_SUCCESS(ret)) { - if (!_libssh2_wincng_bignum_resize(r, length)) { + if(BCRYPT_SUCCESS(ret)) { + if(!_libssh2_wincng_bignum_resize(r, length)) { length = max(a->length, length); bignum = malloc(length); - if (bignum) { - offset = length - a->length; - memset(bignum, 0, offset); - memcpy(bignum + offset, a->bignum, a->length); + if(bignum) { + memcpy_with_be_padding(bignum, length, + a->bignum, a->length); ret = BCryptEncrypt(hKey, bignum, length, NULL, NULL, 0, r->bignum, r->length, &offset, @@ -1939,12 +2120,14 @@ _libssh2_wincng_bignum_mod_exp(_libssh2_bn *r, _libssh2_wincng_safe_free(bignum, length); - if (BCRYPT_SUCCESS(ret)) { + if(BCRYPT_SUCCESS(ret)) { _libssh2_wincng_bignum_resize(r, offset); } - } else + } + else ret = STATUS_NO_MEMORY; - } else + } + else ret = STATUS_NO_MEMORY; } @@ -1961,20 +2144,21 @@ _libssh2_wincng_bignum_set_word(_libssh2_bn *bn, unsigned long word) { unsigned long offset, number, bits, length; - if (!bn) + if(!bn) return -1; bits = 0; number = word; - while (number >>= 1) + while(number >>= 1) bits++; + bits++; - length = (unsigned long) (ceil(((double)(bits + 1)) / 8.0) * + length = (unsigned long) (ceil(((double)bits) / 8.0) * sizeof(unsigned char)); - if (_libssh2_wincng_bignum_resize(bn, length)) + if(_libssh2_wincng_bignum_resize(bn, length)) return -1; - for (offset = 0; offset < length; offset++) + for(offset = 0; offset < length; offset++) bn->bignum[offset] = (word >> (offset * 8)) & 0xff; return 0; @@ -1986,21 +2170,18 @@ _libssh2_wincng_bignum_bits(const _libssh2_bn *bn) unsigned char number; unsigned long offset, length, bits; - if (!bn) + if(!bn || !bn->bignum || !bn->length) return 0; - length = bn->length - 1; - offset = 0; - while (!(*(bn->bignum + offset)) && (offset < length)) + length = bn->length - 1; + while(!bn->bignum[offset] && offset < length) offset++; bits = (length - offset) * 8; number = bn->bignum[offset]; - - while (number >>= 1) + while(number >>= 1) bits++; - bits++; return bits; @@ -2013,10 +2194,10 @@ _libssh2_wincng_bignum_from_bin(_libssh2_bn *bn, unsigned long len, unsigned char *bignum; unsigned long offset, length, bits; - if (!bn || !bin || !len) + if(!bn || !bin || !len) return; - if (_libssh2_wincng_bignum_resize(bn, len)) + if(_libssh2_wincng_bignum_resize(bn, len)) return; memcpy(bn->bignum, bin, len); @@ -2026,7 +2207,7 @@ _libssh2_wincng_bignum_from_bin(_libssh2_bn *bn, unsigned long len, sizeof(unsigned char)); offset = bn->length - length; - if (offset > 0) { + if(offset > 0) { memmove(bn->bignum, bn->bignum + offset, length); #ifdef LIBSSH2_CLEAR_MEMORY @@ -2034,7 +2215,7 @@ _libssh2_wincng_bignum_from_bin(_libssh2_bn *bn, unsigned long len, #endif bignum = realloc(bn->bignum, length); - if (bignum) { + if(bignum) { bn->bignum = bignum; bn->length = length; } @@ -2044,7 +2225,7 @@ _libssh2_wincng_bignum_from_bin(_libssh2_bn *bn, unsigned long len, void _libssh2_wincng_bignum_to_bin(const _libssh2_bn *bn, unsigned char *bin) { - if (bin && bn && bn->bignum && bn->length > 0) { + if(bin && bn && bn->bignum && bn->length > 0) { memcpy(bin, bn->bignum, bn->length); } } @@ -2052,8 +2233,8 @@ _libssh2_wincng_bignum_to_bin(const _libssh2_bn *bn, unsigned char *bin) void _libssh2_wincng_bignum_free(_libssh2_bn *bn) { - if (bn) { - if (bn->bignum) { + if(bn) { + if(bn->bignum) { _libssh2_wincng_safe_free(bn->bignum, bn->length); bn->bignum = NULL; } @@ -2063,14 +2244,368 @@ _libssh2_wincng_bignum_free(_libssh2_bn *bn) } +/*******************************************************************/ /* - * Windows CNG backend: other functions + * Windows CNG backend: Diffie-Hellman support. */ -void _libssh2_init_aes_ctr(void) +void +_libssh2_dh_init(_libssh2_dh_ctx *dhctx) +{ + /* Random from client */ + dhctx->bn = NULL; + dhctx->dh_handle = NULL; + dhctx->dh_params = NULL; +} + +void +_libssh2_dh_dtor(_libssh2_dh_ctx *dhctx) { - /* no implementation */ - (void)0; + if(dhctx->dh_handle) { + BCryptDestroyKey(dhctx->dh_handle); + dhctx->dh_handle = NULL; + } + if(dhctx->dh_params) { + /* Since public dh_params are shared in clear text, + * we don't need to securely zero them out here */ + free(dhctx->dh_params); + dhctx->dh_params = NULL; + } + if(dhctx->bn) { + _libssh2_wincng_bignum_free(dhctx->bn); + dhctx->bn = NULL; + } +} + +/* Generates a Diffie-Hellman key pair using base `g', prime `p' and the given + * `group_order'. Can use the given big number context `bnctx' if needed. The + * private key is stored as opaque in the Diffie-Hellman context `*dhctx' and + * the public key is returned in `public'. 0 is returned upon success, else + * -1. */ +int +_libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, int group_order) +{ + const int hasAlgDHwithKDF = _libssh2_wincng.hasAlgDHwithKDF; + while(_libssh2_wincng.hAlgDH && hasAlgDHwithKDF != -1) { + BCRYPT_DH_PARAMETER_HEADER *dh_params = NULL; + unsigned long dh_params_len; + unsigned char *blob = NULL; + int status; + /* Note that the DH provider requires that keys be multiples of 64 bits + * in length. At the time of writing a practical observed group_order + * value is 257, so we need to round down to 8 bytes of length (64/8) + * in order for kex to succeed */ + DWORD key_length_bytes = max(round_down(group_order, 8), + max(g->length, p->length)); + BCRYPT_DH_KEY_BLOB *dh_key_blob; + LPCWSTR key_type; + + /* Prepare a key pair; pass the in the bit length of the key, + * but the key is not ready for consumption until it is finalized. */ + status = BCryptGenerateKeyPair(_libssh2_wincng.hAlgDH, + &dhctx->dh_handle, + key_length_bytes * 8, 0); + if(!BCRYPT_SUCCESS(status)) { + return -1; + } + + dh_params_len = sizeof(*dh_params) + 2 * key_length_bytes; + blob = malloc(dh_params_len); + if(!blob) { + return -1; + } + + /* Populate DH parameters blob; after the header follows the `p` + * value and the `g` value. */ + dh_params = (BCRYPT_DH_PARAMETER_HEADER*)blob; + dh_params->cbLength = dh_params_len; + dh_params->dwMagic = BCRYPT_DH_PARAMETERS_MAGIC; + dh_params->cbKeyLength = key_length_bytes; + memcpy_with_be_padding(blob + sizeof(*dh_params), key_length_bytes, + p->bignum, p->length); + memcpy_with_be_padding(blob + sizeof(*dh_params) + key_length_bytes, + key_length_bytes, g->bignum, g->length); + + status = BCryptSetProperty(dhctx->dh_handle, BCRYPT_DH_PARAMETERS, + blob, dh_params_len, 0); + if(hasAlgDHwithKDF == -1) { + /* We know that the raw KDF is not supported, so discard this. */ + free(blob); + } + else { + /* Pass ownership to dhctx; these parameters will be freed when + * the context is destroyed. We need to keep the parameters more + * easily available so that we have access to the `g` value when + * _libssh2_dh_secret is called later. */ + dhctx->dh_params = dh_params; + } + dh_params = NULL; + blob = NULL; + + if(!BCRYPT_SUCCESS(status)) { + return -1; + } + + status = BCryptFinalizeKeyPair(dhctx->dh_handle, 0); + if(!BCRYPT_SUCCESS(status)) { + return -1; + } + + key_length_bytes = 0; + if(hasAlgDHwithKDF == 1) { + /* Now we need to extract the public portion of the key so that we + * set it in the `public` bignum to satisfy our caller. + * First measure up the size of the required buffer. */ + key_type = BCRYPT_DH_PUBLIC_BLOB; + } + else { + /* We also need to extract the private portion of the key to + * set it in the `*dhctx' bignum if the raw KDF is not supported. + * First measure up the size of the required buffer. */ + key_type = BCRYPT_DH_PRIVATE_BLOB; + } + status = BCryptExportKey(dhctx->dh_handle, NULL, key_type, + NULL, 0, &key_length_bytes, 0); + if(!BCRYPT_SUCCESS(status)) { + return -1; + } + + blob = malloc(key_length_bytes); + if(!blob) { + return -1; + } + + status = BCryptExportKey(dhctx->dh_handle, NULL, key_type, + blob, key_length_bytes, + &key_length_bytes, 0); + if(!BCRYPT_SUCCESS(status)) { + if(hasAlgDHwithKDF == 1) { + /* We have no private data, because raw KDF is supported */ + free(blob); + } + else { /* we may have potentially private data, use secure free */ + _libssh2_wincng_safe_free(blob, key_length_bytes); + } + return -1; + } + + if(hasAlgDHwithKDF == -1) { + /* We know that the raw KDF is not supported, so discard this */ + BCryptDestroyKey(dhctx->dh_handle); + dhctx->dh_handle = NULL; + } + + /* BCRYPT_DH_PUBLIC_BLOB corresponds to a BCRYPT_DH_KEY_BLOB header + * followed by the Modulus, Generator and Public data. Those components + * each have equal size, specified by dh_key_blob->cbKey. */ + dh_key_blob = (BCRYPT_DH_KEY_BLOB*)blob; + if(_libssh2_wincng_bignum_resize(public, dh_key_blob->cbKey)) { + if(hasAlgDHwithKDF == 1) { + /* We have no private data, because raw KDF is supported */ + free(blob); + } + else { /* we may have potentially private data, use secure free */ + _libssh2_wincng_safe_free(blob, key_length_bytes); + } + return -1; + } + + /* Copy the public key data into the public bignum data buffer */ + memcpy(public->bignum, + blob + sizeof(*dh_key_blob) + 2 * dh_key_blob->cbKey, + dh_key_blob->cbKey); + + if(dh_key_blob->dwMagic == BCRYPT_DH_PRIVATE_MAGIC) { + /* BCRYPT_DH_PRIVATE_BLOB additionally contains the Private data */ + dhctx->bn = _libssh2_wincng_bignum_init(); + if(!dhctx->bn) { + _libssh2_wincng_safe_free(blob, key_length_bytes); + return -1; + } + if(_libssh2_wincng_bignum_resize(dhctx->bn, dh_key_blob->cbKey)) { + _libssh2_wincng_safe_free(blob, key_length_bytes); + return -1; + } + + /* Copy the private key data into the dhctx bignum data buffer */ + memcpy(dhctx->bn->bignum, + blob + sizeof(*dh_key_blob) + 3 * dh_key_blob->cbKey, + dh_key_blob->cbKey); + + /* Make sure the private key is an odd number, because only + * odd primes can be used with the RSA-based fallback while + * DH itself does not seem to care about it being odd or not. */ + if(!(dhctx->bn->bignum[dhctx->bn->length-1] % 2)) { + _libssh2_wincng_safe_free(blob, key_length_bytes); + /* discard everything first, then try again */ + _libssh2_dh_dtor(dhctx); + _libssh2_dh_init(dhctx); + continue; + } + } + + return 0; + } + + /* Generate x and e */ + dhctx->bn = _libssh2_wincng_bignum_init(); + if(!dhctx->bn) + return -1; + if(_libssh2_wincng_bignum_rand(dhctx->bn, group_order * 8 - 1, 0, -1)) + return -1; + if(_libssh2_wincng_bignum_mod_exp(public, g, dhctx->bn, p)) + return -1; + + return 0; +} + +/* Computes the Diffie-Hellman secret from the previously created context + * `*dhctx', the public key `f' from the other party and the same prime `p' + * used at context creation. The result is stored in `secret'. 0 is returned + * upon success, else -1. */ +int +_libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p) +{ + if(_libssh2_wincng.hAlgDH && _libssh2_wincng.hasAlgDHwithKDF != -1 && + dhctx->dh_handle && dhctx->dh_params && f) { + BCRYPT_KEY_HANDLE peer_public = NULL; + BCRYPT_SECRET_HANDLE agreement = NULL; + ULONG secret_len_bytes = 0; + unsigned char *blob; + int status; + unsigned char *start, *end; + BCRYPT_DH_KEY_BLOB *public_blob = NULL; + DWORD key_length_bytes = max(f->length, dhctx->dh_params->cbKeyLength); + DWORD public_blob_len = sizeof(*public_blob) + 3 * key_length_bytes; + + { + /* Populate a BCRYPT_DH_KEY_BLOB; after the header follows the + * Modulus, Generator and Public data. Those components must have + * equal size in this representation. */ + unsigned char *dest; + unsigned char *src; + + blob = malloc(public_blob_len); + if(!blob) { + return -1; + } + public_blob = (BCRYPT_DH_KEY_BLOB*)blob; + public_blob->dwMagic = BCRYPT_DH_PUBLIC_MAGIC; + public_blob->cbKey = key_length_bytes; + + dest = (unsigned char *)(public_blob + 1); + src = (unsigned char *)(dhctx->dh_params + 1); + + /* Modulus (the p-value from the first call) */ + memcpy_with_be_padding(dest, key_length_bytes, src, + dhctx->dh_params->cbKeyLength); + /* Generator (the g-value from the first call) */ + memcpy_with_be_padding(dest + key_length_bytes, key_length_bytes, + src + dhctx->dh_params->cbKeyLength, + dhctx->dh_params->cbKeyLength); + /* Public from the peer */ + memcpy_with_be_padding(dest + 2*key_length_bytes, key_length_bytes, + f->bignum, f->length); + } + + /* Import the peer public key information */ + status = BCryptImportKeyPair(_libssh2_wincng.hAlgDH, NULL, + BCRYPT_DH_PUBLIC_BLOB, &peer_public, blob, + public_blob_len, 0); + if(!BCRYPT_SUCCESS(status)) { + goto out; + } + + /* Set up a handle that we can use to establish the shared secret + * between ourselves (our saved dh_handle) and the peer. */ + status = BCryptSecretAgreement(dhctx->dh_handle, peer_public, + &agreement, 0); + if(!BCRYPT_SUCCESS(status)) { + goto out; + } + + /* Compute the size of the buffer that is needed to hold the derived + * shared secret. */ + status = BCryptDeriveKey(agreement, BCRYPT_KDF_RAW_SECRET, NULL, NULL, + 0, &secret_len_bytes, 0); + if(!BCRYPT_SUCCESS(status)) { + if(status == STATUS_NOT_SUPPORTED) { + _libssh2_wincng.hasAlgDHwithKDF = -1; + } + goto out; + } + + /* Expand the secret bignum to be ready to receive the derived secret + * */ + if(_libssh2_wincng_bignum_resize(secret, secret_len_bytes)) { + status = STATUS_NO_MEMORY; + goto out; + } + + /* And populate the secret bignum */ + status = BCryptDeriveKey(agreement, BCRYPT_KDF_RAW_SECRET, NULL, + secret->bignum, secret_len_bytes, + &secret_len_bytes, 0); + if(!BCRYPT_SUCCESS(status)) { + if(status == STATUS_NOT_SUPPORTED) { + _libssh2_wincng.hasAlgDHwithKDF = -1; + } + goto out; + } + + /* Counter to all the other data in the BCrypt APIs, the raw secret is + * returned to us in host byte order, so we need to swap it to big + * endian order. */ + start = secret->bignum; + end = secret->bignum + secret->length - 1; + while(start < end) { + unsigned char tmp = *end; + *end = *start; + *start = tmp; + start++; + end--; + } + + status = 0; + _libssh2_wincng.hasAlgDHwithKDF = 1; + +out: + if(peer_public) { + BCryptDestroyKey(peer_public); + } + if(agreement) { + BCryptDestroySecret(agreement); + } + if(status == STATUS_NOT_SUPPORTED && + _libssh2_wincng.hasAlgDHwithKDF == -1) { + goto fb; /* fallback to RSA-based implementation */ + } + return BCRYPT_SUCCESS(status) ? 0 : -1; + } + +fb: + /* Compute the shared secret */ + return _libssh2_wincng_bignum_mod_exp(secret, f, dhctx->bn, p); +} + +/* _libssh2_supported_key_sign_algorithms + * + * Return supported key hash algo upgrades, see crypto.h + * + */ + +const char * +_libssh2_supported_key_sign_algorithms(LIBSSH2_SESSION *session, + unsigned char *key_method, + size_t key_method_len) +{ + (void)session; + (void)key_method; + (void)key_method_len; + + return NULL; } #endif /* LIBSSH2_WINCNG */ diff --git a/vendor/libssh2/src/wincng.h b/vendor/libssh2/src/wincng.h index 5219db7f8a..538cc4314f 100755 --- a/vendor/libssh2/src/wincng.h +++ b/vendor/libssh2/src/wincng.h @@ -1,5 +1,7 @@ +#ifndef __LIBSSH2_WINCNG_H +#define __LIBSSH2_WINCNG_H /* - * Copyright (C) 2013-2015 Marc Hoersken + * Copyright (C) 2013-2020 Marc Hoersken * All rights reserved. * * Redistribution and use in source and binary forms, @@ -47,7 +49,6 @@ #include #include - #define LIBSSH2_MD5 1 #define LIBSSH2_HMAC_RIPEMD 0 @@ -55,20 +56,30 @@ #define LIBSSH2_HMAC_SHA512 1 #define LIBSSH2_AES 1 -#define LIBSSH2_AES_CTR 0 +#define LIBSSH2_AES_CTR 1 #define LIBSSH2_BLOWFISH 0 #define LIBSSH2_RC4 1 #define LIBSSH2_CAST 0 #define LIBSSH2_3DES 1 #define LIBSSH2_RSA 1 +#define LIBSSH2_RSA_SHA2 0 #define LIBSSH2_DSA 1 +#define LIBSSH2_ECDSA 0 +#define LIBSSH2_ED25519 0 #define MD5_DIGEST_LENGTH 16 #define SHA_DIGEST_LENGTH 20 #define SHA256_DIGEST_LENGTH 32 +#define SHA384_DIGEST_LENGTH 48 #define SHA512_DIGEST_LENGTH 64 +#define EC_MAX_POINT_LEN ((528 * 2 / 8) + 1) + +#if LIBSSH2_ECDSA +#else +#define _libssh2_ec_key void +#endif /*******************************************************************/ /* @@ -80,19 +91,24 @@ struct _libssh2_wincng_ctx { BCRYPT_ALG_HANDLE hAlgHashMD5; BCRYPT_ALG_HANDLE hAlgHashSHA1; BCRYPT_ALG_HANDLE hAlgHashSHA256; + BCRYPT_ALG_HANDLE hAlgHashSHA384; BCRYPT_ALG_HANDLE hAlgHashSHA512; BCRYPT_ALG_HANDLE hAlgHmacMD5; BCRYPT_ALG_HANDLE hAlgHmacSHA1; BCRYPT_ALG_HANDLE hAlgHmacSHA256; + BCRYPT_ALG_HANDLE hAlgHmacSHA384; BCRYPT_ALG_HANDLE hAlgHmacSHA512; BCRYPT_ALG_HANDLE hAlgRSA; BCRYPT_ALG_HANDLE hAlgDSA; BCRYPT_ALG_HANDLE hAlgAES_CBC; + BCRYPT_ALG_HANDLE hAlgAES_ECB; BCRYPT_ALG_HANDLE hAlgRC4_NA; BCRYPT_ALG_HANDLE hAlg3DES_CBC; + BCRYPT_ALG_HANDLE hAlgDH; + volatile int hasAlgDHwithKDF; /* -1=no, 0=maybe, 1=yes */ }; -struct _libssh2_wincng_ctx _libssh2_wincng; +extern struct _libssh2_wincng_ctx _libssh2_wincng; /*******************************************************************/ @@ -153,7 +169,17 @@ typedef struct __libssh2_wincng_hash_ctx { #define libssh2_sha256(data, datalen, hash) \ _libssh2_wincng_hash(data, datalen, _libssh2_wincng.hAlgHashSHA256, \ hash, SHA256_DIGEST_LENGTH) - +#define libssh2_sha384_ctx _libssh2_wincng_hash_ctx +#define libssh2_sha384_init(ctx) \ + (_libssh2_wincng_hash_init(ctx, _libssh2_wincng.hAlgHashSHA384, \ + SHA384_DIGEST_LENGTH, NULL, 0) == 0) +#define libssh2_sha384_update(ctx, data, datalen) \ + _libssh2_wincng_hash_update(&ctx, (unsigned char *) data, datalen) +#define libssh2_sha384_final(ctx, hash) \ + _libssh2_wincng_hash_final(&ctx, hash) +#define libssh2_sha384(data, datalen, hash) \ +_libssh2_wincng_hash(data, datalen, _libssh2_wincng.hAlgHashSHA384, \ + hash, SHA384_DIGEST_LENGTH) #define libssh2_sha512_ctx _libssh2_wincng_hash_ctx #define libssh2_sha512_init(ctx) \ (_libssh2_wincng_hash_init(ctx, _libssh2_wincng.hAlgHashSHA512, \ @@ -285,9 +311,11 @@ struct _libssh2_wincng_cipher_ctx { BCRYPT_KEY_HANDLE hKey; unsigned char *pbKeyObject; unsigned char *pbIV; + unsigned char *pbCtr; unsigned long dwKeyObject; unsigned long dwIV; unsigned long dwBlockLength; + unsigned long dwCtrLength; }; #define _libssh2_cipher_ctx struct _libssh2_wincng_cipher_ctx @@ -299,21 +327,21 @@ struct _libssh2_wincng_cipher_ctx { struct _libssh2_wincng_cipher_type { BCRYPT_ALG_HANDLE *phAlg; unsigned long dwKeyLength; - unsigned long dwUseIV; + int useIV; /* TODO: Convert to bool when a C89 compatible bool type + is defined */ + int ctrMode; }; #define _libssh2_cipher_type(type) struct _libssh2_wincng_cipher_type type -#define _libssh2_cipher_aes256ctr { NULL, 32, 1 } /* not supported */ -#define _libssh2_cipher_aes192ctr { NULL, 24, 1 } /* not supported */ -#define _libssh2_cipher_aes128ctr { NULL, 16, 1 } /* not supported */ -#define _libssh2_cipher_aes256 { &_libssh2_wincng.hAlgAES_CBC, 32, 1 } -#define _libssh2_cipher_aes192 { &_libssh2_wincng.hAlgAES_CBC, 24, 1 } -#define _libssh2_cipher_aes128 { &_libssh2_wincng.hAlgAES_CBC, 16, 1 } -#define _libssh2_cipher_blowfish { NULL, 16, 0 } /* not supported */ -#define _libssh2_cipher_arcfour { &_libssh2_wincng.hAlgRC4_NA, 16, 0 } -#define _libssh2_cipher_cast5 { NULL, 16, 0 } /* not supported */ -#define _libssh2_cipher_3des { &_libssh2_wincng.hAlg3DES_CBC, 24, 1 } +#define _libssh2_cipher_aes256ctr { &_libssh2_wincng.hAlgAES_ECB, 32, 0, 1 } +#define _libssh2_cipher_aes192ctr { &_libssh2_wincng.hAlgAES_ECB, 24, 0, 1 } +#define _libssh2_cipher_aes128ctr { &_libssh2_wincng.hAlgAES_ECB, 16, 0, 1 } +#define _libssh2_cipher_aes256 { &_libssh2_wincng.hAlgAES_CBC, 32, 1, 0 } +#define _libssh2_cipher_aes192 { &_libssh2_wincng.hAlgAES_CBC, 24, 1, 0 } +#define _libssh2_cipher_aes128 { &_libssh2_wincng.hAlgAES_CBC, 16, 1, 0 } +#define _libssh2_cipher_arcfour { &_libssh2_wincng.hAlgRC4_NA, 16, 0, 0 } +#define _libssh2_cipher_3des { &_libssh2_wincng.hAlg3DES_CBC, 24, 1, 0 } /* * Windows CNG backend: Cipher functions @@ -358,10 +386,6 @@ _libssh2_bn *_libssh2_wincng_bignum_init(void); _libssh2_wincng_bignum_init() #define _libssh2_bn_init_from_bin() \ _libssh2_bn_init() -#define _libssh2_bn_rand(bn, bits, top, bottom) \ - _libssh2_wincng_bignum_rand(bn, bits, top, bottom) -#define _libssh2_bn_mod_exp(r, a, p, m, ctx) \ - _libssh2_wincng_bignum_mod_exp(r, a, p, m, ctx) #define _libssh2_bn_set_word(bn, word) \ _libssh2_wincng_bignum_set_word(bn, word) #define _libssh2_bn_from_bin(bn, len, bin) \ @@ -374,6 +398,28 @@ _libssh2_bn *_libssh2_wincng_bignum_init(void); #define _libssh2_bn_free(bn) \ _libssh2_wincng_bignum_free(bn) +/* + * Windows CNG backend: Diffie-Hellman support + */ + +typedef struct { + /* holds our private and public key components */ + BCRYPT_KEY_HANDLE dh_handle; + /* records the parsed out modulus and generator + * parameters that are shared with the peer */ + BCRYPT_DH_PARAMETER_HEADER *dh_params; + /* records the parsed out private key component for + * fallback if the DH API raw KDF is not supported */ + struct _libssh2_wincng_bignum *bn; +} _libssh2_dh_ctx; + +#define libssh2_dh_init(dhctx) _libssh2_dh_init(dhctx) +#define libssh2_dh_key_pair(dhctx, public, g, p, group_order, bnctx) \ + _libssh2_dh_key_pair(dhctx, public, g, p, group_order) +#define libssh2_dh_secret(dhctx, secret, f, p, bnctx) \ + _libssh2_dh_secret(dhctx, secret, f, p) +#define libssh2_dh_dtor(dhctx) _libssh2_dh_dtor(dhctx) + /*******************************************************************/ /* * Windows CNG backend: forward declarations @@ -381,7 +427,6 @@ _libssh2_bn *_libssh2_wincng_bignum_init(void); void _libssh2_wincng_init(void); void _libssh2_wincng_free(void); int _libssh2_wincng_random(void *buf, int len); -void _libssh2_init_aes_ctr(void); int _libssh2_wincng_hash_init(_libssh2_wincng_hash_ctx *ctx, @@ -531,14 +576,6 @@ _libssh2_wincng_cipher_dtor(_libssh2_cipher_ctx *ctx); _libssh2_bn * _libssh2_wincng_bignum_init(void); int -_libssh2_wincng_bignum_rand(_libssh2_bn *rnd, int bits, int top, int bottom); -int -_libssh2_wincng_bignum_mod_exp(_libssh2_bn *r, - _libssh2_bn *a, - _libssh2_bn *p, - _libssh2_bn *m, - _libssh2_bn_ctx *bnctx); -int _libssh2_wincng_bignum_set_word(_libssh2_bn *bn, unsigned long word); unsigned long _libssh2_wincng_bignum_bits(const _libssh2_bn *bn); @@ -549,3 +586,15 @@ void _libssh2_wincng_bignum_to_bin(const _libssh2_bn *bn, unsigned char *bin); void _libssh2_wincng_bignum_free(_libssh2_bn *bn); +extern void +_libssh2_dh_init(_libssh2_dh_ctx *dhctx); +extern int +_libssh2_dh_key_pair(_libssh2_dh_ctx *dhctx, _libssh2_bn *public, + _libssh2_bn *g, _libssh2_bn *p, int group_order); +extern int +_libssh2_dh_secret(_libssh2_dh_ctx *dhctx, _libssh2_bn *secret, + _libssh2_bn *f, _libssh2_bn *p); +extern void +_libssh2_dh_dtor(_libssh2_dh_ctx *dhctx); + +#endif /* __LIBSSH2_WINCNG_H */ diff --git a/vendor/libssh2/test-driver b/vendor/libssh2/test-driver index 32bf39e837..9759384aa7 100755 --- a/vendor/libssh2/test-driver +++ b/vendor/libssh2/test-driver @@ -1,9 +1,9 @@ #! /bin/sh # test-driver - basic testsuite driver script. -scriptversion=2012-06-27.10; # UTC +scriptversion=2018-03-07.03; # UTC -# Copyright (C) 2011-2013 Free Software Foundation, Inc. +# Copyright (C) 2011-2020 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -16,7 +16,7 @@ scriptversion=2012-06-27.10; # UTC # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -42,15 +42,16 @@ print_usage () { cat <$log_file 2>&1 estatus=$? + if test $enable_hard_errors = no && test $estatus -eq 99; then - estatus=1 + tweaked_estatus=1 +else + tweaked_estatus=$estatus fi -case $estatus:$expect_failure in +case $tweaked_estatus:$expect_failure in 0:yes) col=$red res=XPASS recheck=yes gcopy=yes;; 0:*) col=$grn res=PASS recheck=no gcopy=no;; 77:*) col=$blu res=SKIP recheck=no gcopy=yes;; @@ -107,6 +124,12 @@ case $estatus:$expect_failure in *:*) col=$red res=FAIL recheck=yes gcopy=yes;; esac +# Report the test outcome and exit status in the logs, so that one can +# know whether the test passed or failed simply by looking at the '.log' +# file, without the need of also peaking into the corresponding '.trs' +# file (automake bug#11814). +echo "$res $test_name (exit status: $estatus)" >>$log_file + # Report outcome to console. echo "${col}${res}${std}: $test_name" @@ -119,9 +142,9 @@ echo ":copy-in-global-log: $gcopy" >> $trs_file # Local Variables: # mode: shell-script # sh-indentation: 2 -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" +# time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: diff --git a/vendor/libssh2/tests/CMakeLists.txt b/vendor/libssh2/tests/CMakeLists.txt index bd0f903e28..cf4b3f768a 100644 --- a/vendor/libssh2/tests/CMakeLists.txt +++ b/vendor/libssh2/tests/CMakeLists.txt @@ -43,15 +43,70 @@ include(SocketLibraries) ## Platform checks check_include_files(inttypes.h HAVE_INTTYPES_H) check_include_files(unistd.h HAVE_UNISTD_H) +check_include_files(sys/param.h HAVE_SYS_PARAM_H) check_include_files(sys/socket.h HAVE_SYS_SOCKET_H) check_include_files(arpa/inet.h HAVE_ARPA_INET_H) check_include_files(windows.h HAVE_WINDOWS_H) check_include_files(winsock2.h HAVE_WINSOCK2_H) +check_include_files(netinet/in.h HAVE_NETINET_IN_H) configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/libssh2_config_cmake.h.in" "${CMAKE_CURRENT_BINARY_DIR}/libssh2_config.h") append_needed_socket_libraries(LIBRARIES) +## Cryptography backend choice + +set(CRYPTO_BACKEND + "" + CACHE + STRING + "The backend to use for cryptography: OpenSSL, Libgcrypt or WinCNG, mbedTLS +or empty to try any available") + +# If the crypto backend was given, rather than searching for the first +# we are able to find, the find_package commands must abort configuration +# and report to the user. +if(CRYPTO_BACKEND) + set(SPECIFIC_CRYPTO_REQUIREMENT REQUIRED) +endif() + +if(CRYPTO_BACKEND STREQUAL "OpenSSL" OR NOT CRYPTO_BACKEND) + + find_package(OpenSSL ${SPECIFIC_CRYPTO_REQUIREMENT}) + + if(OPENSSL_FOUND) + set(CRYPTO_BACKEND "OpenSSL") + endif() +endif() + +if(CRYPTO_BACKEND STREQUAL "Libgcrypt" OR NOT CRYPTO_BACKEND) + + find_package(Libgcrypt ${SPECIFIC_CRYPTO_REQUIREMENT}) + + if(LIBGCRYPT_FOUND) + set(CRYPTO_BACKEND "Libgcrypt") + endif() +endif() + +if(CRYPTO_BACKEND STREQUAL "WinCNG" OR NOT CRYPTO_BACKEND) + + # The check actually compiles the header. This requires windows.h. + check_include_files("windows.h;bcrypt.h" HAVE_BCRYPT_H) + + if(HAVE_BCRYPT_H) + set(CRYPTO_BACKEND "WinCNG") + endif() +endif() + +if(CRYPTO_BACKEND STREQUAL "mbedTLS" OR NOT CRYPTO_BACKEND) + + find_package(mbedTLS ${SPECIFIC_CRYPTO_REQUIREMENT}) + + if(MBEDTLS_FOUND) + set(CRYPTO_BACKEND "mbedTLS") + endif() +endif() + set(TESTS hostkey hostkey_hash @@ -60,11 +115,33 @@ set(TESTS password_auth_fails_with_wrong_username public_key_auth_fails_with_wrong_key public_key_auth_succeeds_with_correct_rsa_key - public_key_auth_succeeds_with_correct_dsa_key + public_key_auth_succeeds_with_correct_encrypted_rsa_key keyboard_interactive_auth_fails_with_wrong_response keyboard_interactive_auth_succeeds_with_correct_response + agent_forward_succeeds ) +if(CRYPTO_BACKEND STREQUAL "OpenSSL") + list(APPEND TESTS + public_key_auth_succeeds_with_correct_rsa_openssh_key + ) + if(OPENSSL_VERSION VERSION_GREATER "1.1.0") + list(APPEND TESTS + public_key_auth_succeeds_with_correct_ed25519_key + public_key_auth_succeeds_with_correct_encrypted_ed25519_key + public_key_auth_succeeds_with_correct_ed25519_key_from_mem + public_key_auth_succeeds_with_correct_ecdsa_key + public_key_auth_succeeds_with_correct_signed_ecdsa_key + ) + endif() +endif() + +if(NOT CRYPTO_BACKEND STREQUAL "mbedTLS") + list(APPEND TESTS + public_key_auth_succeeds_with_correct_dsa_key + ) +endif() + add_library(openssh_fixture STATIC openssh_fixture.h openssh_fixture.c) target_link_libraries(openssh_fixture ${LIBRARIES}) target_include_directories(openssh_fixture PRIVATE "${CMAKE_CURRENT_BINARY_DIR}") @@ -82,6 +159,7 @@ foreach(test ${TESTS}) target_link_libraries(test_${test} libssh2 runner ${LIBRARIES}) target_include_directories(test_${test} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}") list(APPEND TEST_TARGETS test_${test}) + add_definitions(-DFIXTURE_WORKDIR="${CMAKE_CURRENT_SOURCE_DIR}") add_test( NAME test_${test} COMMAND $ diff --git a/vendor/libssh2/tests/Makefile.am b/vendor/libssh2/tests/Makefile.am index 3c3745c680..27ddc2dfa7 100644 --- a/vendor/libssh2/tests/Makefile.am +++ b/vendor/libssh2/tests/Makefile.am @@ -1,3 +1,5 @@ +SUBDIRS = ossfuzz + AM_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include -I$(top_builddir)/src LDADD = ../src/libssh2.la @@ -16,18 +18,61 @@ check_PROGRAMS = $(ctests) TESTS_ENVIRONMENT = SSHD=$(SSHD) EXEEXT=$(EXEEXT) TESTS_ENVIRONMENT += srcdir=$(top_srcdir)/tests builddir=$(top_builddir)/tests -EXTRA_DIST = ssh2.sh mansyntax.sh -EXTRA_DIST += etc/host etc/host.pub etc/user etc/user.pub -EXTRA_DIST += CMakeLists.txt libssh2_config_cmake.h.in sshd_fixture.sh.in -EXTRA_DIST += key_dsa key_dsa.pub key_dsa_wrong key_dsa_wrong.pub key_rsa key_rsa.pub -EXTRA_DIST += openssh_server/authorized_keys openssh_server/Dockerfile openssh_server/ssh_host_rsa_key -EXTRA_DIST += openssh_fixture.c openssh_fixture.h runner.c session_fixture.c session_fixture.h -EXTRA_DIST += test_hostkey.c test_hostkey_hash.c -EXTRA_DIST += test_keyboard_interactive_auth_fails_with_wrong_response.c -EXTRA_DIST += test_keyboard_interactive_auth_succeeds_with_correct_response.c -EXTRA_DIST += test_password_auth_fails_with_wrong_password.c -EXTRA_DIST += test_password_auth_fails_with_wrong_username.c -EXTRA_DIST += test_password_auth_succeeds_with_correct_credentials.c -EXTRA_DIST += test_public_key_auth_fails_with_wrong_key.c -EXTRA_DIST += test_public_key_auth_succeeds_with_correct_dsa_key.c -EXTRA_DIST += test_public_key_auth_succeeds_with_correct_rsa_key.c +EXTRA_DIST = \ + CMakeLists.txt \ + etc/host \ + etc/host.pub \ + etc/user \ + etc/user.pub \ + key_dsa \ + key_dsa.pub \ + key_dsa_wrong \ + key_dsa_wrong.pub \ + key_ecdsa \ + key_ecdsa.pub \ + key_ed25519 \ + key_ed25519.pub \ + key_ed25519_encrypted \ + key_ed25519_encrypted.pub \ + key_rsa \ + key_rsa.pub \ + key_rsa_encrypted \ + key_rsa_encrypted.pub \ + key_rsa_openssh \ + key_rsa_openssh.pub \ + libssh2_config_cmake.h.in \ + mansyntax.sh \ + openssh_fixture.c \ + openssh_fixture.h \ + openssh_server/Dockerfile \ + openssh_server/authorized_keys \ + openssh_server/ca_ecdsa \ + openssh_server/ca_ecdsa.pub \ + openssh_server/ssh_host_ecdsa_key \ + openssh_server/ssh_host_ed25519_key \ + openssh_server/ssh_host_rsa_key \ + runner.c \ + session_fixture.c \ + session_fixture.h \ + simple.c \ + ssh2.c \ + ssh2.sh \ + sshd_fixture.sh.in \ + test_agent_forward_succeeds.c \ + test_hostkey.c \ + test_hostkey_hash.c \ + test_keyboard_interactive_auth_fails_with_wrong_response.c \ + test_keyboard_interactive_auth_succeeds_with_correct_response.c \ + test_password_auth_fails_with_wrong_password.c \ + test_password_auth_fails_with_wrong_username.c \ + test_password_auth_succeeds_with_correct_credentials.c \ + test_public_key_auth_fails_with_wrong_key.c \ + test_public_key_auth_succeeds_with_correct_dsa_key.c \ + test_public_key_auth_succeeds_with_correct_ed25519_key.c \ + test_public_key_auth_succeeds_with_correct_ed25519_key_from_mem.c \ + test_public_key_auth_succeeds_with_correct_ecdsa_key.c \ + test_public_key_auth_succeeds_with_correct_signed_ecdsa_key.c \ + test_public_key_auth_succeeds_with_correct_encrypted_ed25519_key.c \ + test_public_key_auth_succeeds_with_correct_encrypted_rsa_key.c \ + test_public_key_auth_succeeds_with_correct_rsa_key.c \ + test_public_key_auth_succeeds_with_correct_rsa_openssh_key.c diff --git a/vendor/libssh2/tests/Makefile.in b/vendor/libssh2/tests/Makefile.in index b3e7d461b1..e5eb881acb 100644 --- a/vendor/libssh2/tests/Makefile.in +++ b/vendor/libssh2/tests/Makefile.in @@ -1,7 +1,7 @@ -# Makefile.in generated by automake 1.15 from Makefile.am. +# Makefile.in generated by automake 1.16.4 from Makefile.am. # @configure_input@ -# Copyright (C) 1994-2014 Free Software Foundation, Inc. +# Copyright (C) 1994-2021 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -102,8 +102,7 @@ am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d -CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h \ - $(top_builddir)/example/libssh2_config.h +CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) @@ -132,9 +131,10 @@ AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = -DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/src -I$(top_builddir)/example +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/src depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/simple.Po ./$(DEPDIR)/ssh2.Po am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) @@ -156,11 +156,27 @@ am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = simple.c $(ssh2_SOURCES) DIST_SOURCES = simple.c $(am__ssh2_SOURCES_DIST) +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + check recheck distdir distdir-am am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is @@ -178,8 +194,6 @@ am__define_uniq_tagged_files = \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no @@ -362,8 +376,8 @@ am__set_TESTS_bases = \ bases='$(TEST_LOGS)'; \ bases=`for i in $$bases; do echo $$i; done | sed 's/\.log$$//'`; \ bases=`echo $$bases` +AM_TESTSUITE_SUMMARY_HEADER = ' for $(PACKAGE_STRING)' RECHECK_LOGS = $(TEST_LOGS) -AM_RECURSIVE_TARGETS = check recheck TEST_SUITE_LOG = test-suite.log TEST_EXTENSIONS = @EXEEXT@ .test LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver @@ -384,9 +398,35 @@ TEST_LOGS = $(am__test_logs2:.test.log=.log) TEST_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver TEST_LOG_COMPILE = $(TEST_LOG_COMPILER) $(AM_TEST_LOG_FLAGS) \ $(TEST_LOG_FLAGS) +DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp \ $(top_srcdir)/test-driver DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ @@ -402,6 +442,12 @@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ @@ -412,13 +458,14 @@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ +ETAGS = @ETAGS@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ HAVE_LIBBCRYPT = @HAVE_LIBBCRYPT@ HAVE_LIBCRYPT32 = @HAVE_LIBCRYPT32@ HAVE_LIBGCRYPT = @HAVE_LIBGCRYPT@ -HAVE_LIBMBEDTLS = @HAVE_LIBMBEDTLS@ +HAVE_LIBMBEDCRYPTO = @HAVE_LIBMBEDCRYPTO@ HAVE_LIBSSL = @HAVE_LIBSSL@ HAVE_LIBZ = @HAVE_LIBZ@ INSTALL = @INSTALL@ @@ -434,8 +481,8 @@ LIBCRYPT32 = @LIBCRYPT32@ LIBCRYPT32_PREFIX = @LIBCRYPT32_PREFIX@ LIBGCRYPT = @LIBGCRYPT@ LIBGCRYPT_PREFIX = @LIBGCRYPT_PREFIX@ -LIBMBEDTLS = @LIBMBEDTLS@ -LIBMBEDTLS_PREFIX = @LIBMBEDTLS_PREFIX@ +LIBMBEDCRYPTO = @LIBMBEDCRYPTO@ +LIBMBEDCRYPTO_PREFIX = @LIBMBEDCRYPTO_PREFIX@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSREQUIRED = @LIBSREQUIRED@ @@ -445,12 +492,13 @@ LIBSSL_PREFIX = @LIBSSL_PREFIX@ LIBTOOL = @LIBTOOL@ LIBZ = @LIBZ@ LIBZ_PREFIX = @LIBZ_PREFIX@ +LIB_FUZZING_ENGINE = @LIB_FUZZING_ENGINE@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBBCRYPT = @LTLIBBCRYPT@ LTLIBCRYPT32 = @LTLIBCRYPT32@ LTLIBGCRYPT = @LTLIBGCRYPT@ -LTLIBMBEDTLS = @LTLIBMBEDTLS@ +LTLIBMBEDCRYPTO = @LTLIBMBEDCRYPTO@ LTLIBOBJS = @LTLIBOBJS@ LTLIBSSL = @LTLIBSSL@ LTLIBZ = @LTLIBZ@ @@ -486,6 +534,7 @@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ @@ -533,6 +582,7 @@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ +SUBDIRS = ossfuzz AM_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include -I$(top_builddir)/src LDADD = ../src/libssh2.la @SSHD_TRUE@ssh2_SOURCES = ssh2.c @@ -541,23 +591,66 @@ TESTS = $(ctests) mansyntax.sh $(am__append_1) check_PROGRAMS = $(ctests) TESTS_ENVIRONMENT = SSHD=$(SSHD) EXEEXT=$(EXEEXT) \ srcdir=$(top_srcdir)/tests builddir=$(top_builddir)/tests -EXTRA_DIST = ssh2.sh mansyntax.sh etc/host etc/host.pub etc/user \ - etc/user.pub CMakeLists.txt libssh2_config_cmake.h.in \ - sshd_fixture.sh.in key_dsa key_dsa.pub key_dsa_wrong \ - key_dsa_wrong.pub key_rsa key_rsa.pub \ - openssh_server/authorized_keys openssh_server/Dockerfile \ - openssh_server/ssh_host_rsa_key openssh_fixture.c \ - openssh_fixture.h runner.c session_fixture.c session_fixture.h \ - test_hostkey.c test_hostkey_hash.c \ - test_keyboard_interactive_auth_fails_with_wrong_response.c \ - test_keyboard_interactive_auth_succeeds_with_correct_response.c \ - test_password_auth_fails_with_wrong_password.c \ - test_password_auth_fails_with_wrong_username.c \ - test_password_auth_succeeds_with_correct_credentials.c \ - test_public_key_auth_fails_with_wrong_key.c \ - test_public_key_auth_succeeds_with_correct_dsa_key.c \ - test_public_key_auth_succeeds_with_correct_rsa_key.c -all: all-am +EXTRA_DIST = \ + CMakeLists.txt \ + etc/host \ + etc/host.pub \ + etc/user \ + etc/user.pub \ + key_dsa \ + key_dsa.pub \ + key_dsa_wrong \ + key_dsa_wrong.pub \ + key_ecdsa \ + key_ecdsa.pub \ + key_ed25519 \ + key_ed25519.pub \ + key_ed25519_encrypted \ + key_ed25519_encrypted.pub \ + key_rsa \ + key_rsa.pub \ + key_rsa_encrypted \ + key_rsa_encrypted.pub \ + key_rsa_openssh \ + key_rsa_openssh.pub \ + libssh2_config_cmake.h.in \ + mansyntax.sh \ + openssh_fixture.c \ + openssh_fixture.h \ + openssh_server/Dockerfile \ + openssh_server/authorized_keys \ + openssh_server/ca_ecdsa \ + openssh_server/ca_ecdsa.pub \ + openssh_server/ssh_host_ecdsa_key \ + openssh_server/ssh_host_ed25519_key \ + openssh_server/ssh_host_rsa_key \ + runner.c \ + session_fixture.c \ + session_fixture.h \ + simple.c \ + ssh2.c \ + ssh2.sh \ + sshd_fixture.sh.in \ + test_agent_forward_succeeds.c \ + test_hostkey.c \ + test_hostkey_hash.c \ + test_keyboard_interactive_auth_fails_with_wrong_response.c \ + test_keyboard_interactive_auth_succeeds_with_correct_response.c \ + test_password_auth_fails_with_wrong_password.c \ + test_password_auth_fails_with_wrong_username.c \ + test_password_auth_succeeds_with_correct_credentials.c \ + test_public_key_auth_fails_with_wrong_key.c \ + test_public_key_auth_succeeds_with_correct_dsa_key.c \ + test_public_key_auth_succeeds_with_correct_ed25519_key.c \ + test_public_key_auth_succeeds_with_correct_ed25519_key_from_mem.c \ + test_public_key_auth_succeeds_with_correct_ecdsa_key.c \ + test_public_key_auth_succeeds_with_correct_signed_ecdsa_key.c \ + test_public_key_auth_succeeds_with_correct_encrypted_ed25519_key.c \ + test_public_key_auth_succeeds_with_correct_encrypted_rsa_key.c \ + test_public_key_auth_succeeds_with_correct_rsa_key.c \ + test_public_key_auth_succeeds_with_correct_rsa_openssh_key.c + +all: all-recursive .SUFFIXES: .SUFFIXES: .c .lo .log .o .obj .test .test$(EXEEXT) .trs @@ -578,8 +671,8 @@ Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) @@ -623,8 +716,14 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @@ -653,14 +752,61 @@ mostlyclean-libtool: clean-libtool: -rm -rf .libs _libs +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-am +tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ @@ -673,7 +819,7 @@ tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $$unique; \ fi; \ fi -ctags: ctags-am +ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) @@ -686,7 +832,7 @@ GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-am +cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ @@ -812,7 +958,7 @@ $(TEST_SUITE_LOG): $(TEST_LOGS) test x"$$VERBOSE" = x || cat $(TEST_SUITE_LOG); \ fi; \ echo "$${col}$$br$${std}"; \ - echo "$${col}Testsuite summary for $(PACKAGE_STRING)$${std}"; \ + echo "$${col}Testsuite summary"$(AM_TESTSUITE_SUMMARY_HEADER)"$${std}"; \ echo "$${col}$$br$${std}"; \ create_testsuite_report --maybe-color; \ echo "$$col$$br$$std"; \ @@ -825,7 +971,7 @@ $(TEST_SUITE_LOG): $(TEST_LOGS) fi; \ $$success || exit 1 -check-TESTS: +check-TESTS: $(check_PROGRAMS) @list='$(RECHECK_LOGS)'; test -z "$$list" || rm -f $$list @list='$(RECHECK_LOGS:.log=.trs)'; test -z "$$list" || rm -f $$list @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) @@ -881,8 +1027,10 @@ ssh2.sh.log: ssh2.sh @am__EXEEXT_TRUE@ --log-file $$b.log --trs-file $$b.trs \ @am__EXEEXT_TRUE@ $(am__common_driver_flags) $(AM_TEST_LOG_DRIVER_FLAGS) $(TEST_LOG_DRIVER_FLAGS) -- $(TEST_LOG_COMPILE) \ @am__EXEEXT_TRUE@ "$$tst" $(AM_TESTS_FD_REDIRECT) +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am -distdir: $(DISTFILES) +distdir-am: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ @@ -912,21 +1060,47 @@ distdir: $(DISTFILES) || exit 1; \ fi; \ done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: check-am +check: check-recursive all-am: Makefile $(PROGRAMS) -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am -installcheck: installcheck-am +installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ @@ -951,80 +1125,83 @@ distclean-generic: maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -clean: clean-am +clean: clean-recursive clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ clean-noinstPROGRAMS mostlyclean-am -distclean: distclean-am - -rm -rf ./$(DEPDIR) +distclean: distclean-recursive + -rm -f ./$(DEPDIR)/simple.Po + -rm -f ./$(DEPDIR)/ssh2.Po -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags -dvi: dvi-am +dvi: dvi-recursive dvi-am: -html: html-am +html: html-recursive html-am: -info: info-am +info: info-recursive info-am: install-data-am: -install-dvi: install-dvi-am +install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: -install-html: install-html-am +install-html: install-html-recursive install-html-am: -install-info: install-info-am +install-info: install-info-recursive install-info-am: install-man: -install-pdf: install-pdf-am +install-pdf: install-pdf-recursive install-pdf-am: -install-ps: install-ps-am +install-ps: install-ps-recursive install-ps-am: installcheck-am: -maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) +maintainer-clean: maintainer-clean-recursive + -rm -f ./$(DEPDIR)/simple.Po + -rm -f ./$(DEPDIR)/ssh2.Po -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic -mostlyclean: mostlyclean-am +mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool -pdf: pdf-am +pdf: pdf-recursive pdf-am: -ps: ps-am +ps: ps-recursive ps-am: uninstall-am: -.MAKE: check-am install-am install-strip +.MAKE: $(am__recursive_targets) check-am install-am install-strip -.PHONY: CTAGS GTAGS TAGS all all-am check check-TESTS check-am clean \ +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ + am--depfiles check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool \ clean-noinstPROGRAMS cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ @@ -1034,10 +1211,10 @@ uninstall-am: install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ - pdf pdf-am ps ps-am recheck tags tags-am uninstall \ - uninstall-am + installdirs-am maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am recheck tags tags-am \ + uninstall uninstall-am .PRECIOUS: Makefile diff --git a/vendor/libssh2/tests/key_ecdsa b/vendor/libssh2/tests/key_ecdsa new file mode 100644 index 0000000000..6ed60773fb --- /dev/null +++ b/vendor/libssh2/tests/key_ecdsa @@ -0,0 +1,10 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAiAAAABNlY2RzYS +1zaGEyLW5pc3RwMzg0AAAACG5pc3RwMzg0AAAAYQTosiScH/oRSazpIpPSEFcY4YVZyNby +peARi49N3qy78OE118KGc5T8eifd+n1PSb7z8PnfDwOL4jBHxW5nWx0RCocIt7tb2a349J +gfEl8PegHGcF/DwC+eesIKJvv0MfkAAADIKLgw6yi4MOsAAAATZWNkc2Etc2hhMi1uaXN0 +cDM4NAAAAAhuaXN0cDM4NAAAAGEE6LIknB/6EUms6SKT0hBXGOGFWcjW8qXgEYuPTd6su/ +DhNdfChnOU/Hon3fp9T0m+8/D53w8Di+IwR8VuZ1sdEQqHCLe7W9mt+PSYHxJfD3oBxnBf +w8AvnnrCCib79DH5AAAAMGYdHu+u2/L8zC/0S9bao9y6vKiLSuTEfZpCIsyE5jWj/vrS0n +r1lzv9kKj+5A86aQAAAAA= +-----END OPENSSH PRIVATE KEY----- diff --git a/vendor/libssh2/tests/key_ecdsa.pub b/vendor/libssh2/tests/key_ecdsa.pub new file mode 100644 index 0000000000..597f63fcd4 --- /dev/null +++ b/vendor/libssh2/tests/key_ecdsa.pub @@ -0,0 +1 @@ +ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBOiyJJwf+hFJrOkik9IQVxjhhVnI1vKl4BGLj03erLvw4TXXwoZzlPx6J936fU9JvvPw+d8PA4viMEfFbmdbHREKhwi3u1vZrfj0mB8SXw96AcZwX8PAL556wgom+/Qx+Q== diff --git a/vendor/libssh2/tests/key_ed25519 b/vendor/libssh2/tests/key_ed25519 new file mode 100644 index 0000000000..bfb1ad566d --- /dev/null +++ b/vendor/libssh2/tests/key_ed25519 @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACCMbXcoNmUVxO9FMMj1VB91MnwwVfBl+XDxet+j+oY6JgAAAJg8nvUxPJ71 +MQAAAAtzc2gtZWQyNTUxOQAAACCMbXcoNmUVxO9FMMj1VB91MnwwVfBl+XDxet+j+oY6Jg +AAAECnhCuTDYdz3kUn48BXkaCXXdbKdH7wSIQ/CUx1cbnR0Ixtdyg2ZRXE70UwyPVUH3Uy +fDBV8GX5cPF636P6hjomAAAAEHdpbGxAaUN1YmUubG9jYWwBAgMEBQ== +-----END OPENSSH PRIVATE KEY----- diff --git a/vendor/libssh2/tests/key_ed25519.pub b/vendor/libssh2/tests/key_ed25519.pub new file mode 100644 index 0000000000..cd592194b5 --- /dev/null +++ b/vendor/libssh2/tests/key_ed25519.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIxtdyg2ZRXE70UwyPVUH3UyfDBV8GX5cPF636P6hjom diff --git a/vendor/libssh2/tests/key_ed25519_encrypted b/vendor/libssh2/tests/key_ed25519_encrypted new file mode 100644 index 0000000000..109de44489 --- /dev/null +++ b/vendor/libssh2/tests/key_ed25519_encrypted @@ -0,0 +1,8 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAACmFlczI1Ni1jdHIAAAAGYmNyeXB0AAAAGAAAABD4qdu8J/ +EAqXFQERrvzMerAAAAEAAAAAEAAAAzAAAAC3NzaC1lZDI1NTE5AAAAICHxEyUTOVHXvdMF +ARedFQ+H9DW/n8Zy3daKKRqnTDMqAAAAoO05oxXUkLz8cMQcMeeRSc4UvsaWnCvfN4Qm15 +NaVwSjb/09AcGGVeF1xxwPEIjwsIRftAjjgLuauI6XpXzyeDOlr2HnwzgpZtmeaHzbB7lS +NjpSENP+fXipXinSfgZqGOItPbbismEVWX4sQn/Zla6/f/JAcDV60TK2ZhVEZ5072t0NcA +eZQeSzBnpoRhlB7IDO7/7pmu1kNysUzH94Bw8= +-----END OPENSSH PRIVATE KEY----- diff --git a/vendor/libssh2/tests/key_ed25519_encrypted.pub b/vendor/libssh2/tests/key_ed25519_encrypted.pub new file mode 100644 index 0000000000..bc331555ad --- /dev/null +++ b/vendor/libssh2/tests/key_ed25519_encrypted.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICHxEyUTOVHXvdMFARedFQ+H9DW/n8Zy3daKKRqnTDMq diff --git a/vendor/libssh2/tests/key_rsa_encrypted b/vendor/libssh2/tests/key_rsa_encrypted new file mode 100644 index 0000000000..2f5e057521 --- /dev/null +++ b/vendor/libssh2/tests/key_rsa_encrypted @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,E030CC29AE4C20669EE41DF3DE4C0886 + +gLMMWawVUpqtAtpSVnr7HcH+P0gHKFU00hUhNY18TNJRfFIhPqc9R9xsLHgBac7k +vHtD2nzUuWLVMIKQoS3+1IF/KO6Xj1zSqnyk49hyKWxFiD3U1YdZAiZNgJ/s6olg +J/h4mNNsz8Nh6Swp3HwP2jtLHWHV8fOzsaE3dvnVYZ5gPnec7XAYcQycbOV0t2Wd +NGlP09ooAQRWWuf1TaBewjj7Jm40l8OQat5EKZKzydUZZQYAqfJQ7fIw7jI/lQYF +KJj9tq0ceFdxvk8LYMr5a+ixnDwirxgg4L0X3fjLocfLVt42qDKkfOGXGg9VI8CO +gjTu/MbodGCWKe/5eeCSSLrKo486S/5B6RzN0Ax4QBb1iYAN5IECsV91Ekk0socY +DBZmDMEGHppHJhNhbBzfMYeKssWbOQf9z0y+gDPesImV2gXNoMgTcmZrCLOJWj6L +ifQAtTKc8P5fV+TLhg3dKmxCt1UMlCHpWWl7tPTsO3WaaXi50f9ypRfpbpH0hket +SO//bZqF0lF+Ci8uC6ndXBniIfinFoMWPsY01bxyHvmEMwCTVApZEkrZtGlHnavy +d2oYJ0Oc+eeSjnx2BccQ9GdoI3292CVJVgPiibr42updecFwTry+j//IY8H3d62m +UYUXJQgIL04o2/1UIT0mPWwPigF3sJSZOwT5arn4MgUyBCC18p6OTmGmvIyrz3YB +imbhndok/30sMwtJocgXKTdyreMUp0s8Hpw/2z4LQ0qlOstKwa8KnDcFAqaSFNGD +8tGTobAcGRhqq93PRrn3aRibk1T3KDpMF/oqRaajgBmXFVYOk1yuLy6meB+wAJFD +VVIokZaygYs13SMX+hau0Gd6PmVh8QF0RmvG69ga7k4dfJMbe2uU59wf9uABmyvd ++Ju+uXGiF0wYfcyv6HJarPIqA3630TPKR/z8dDWC3rJ83xx7hIpoEAA+b7RA3Vtx +Gv0EoDK6zeq4UJK0tqxMZJuy+FHBDZhv+gAeB/PmIGPIbF+jV+flmCrUgHg2Ka8I +Iaap+lQKMj5lzOv/1bbcZ6cpj717MGvo3XOwOD4x5b3wRX6DWphB0+oRWiVU3Vrd +PiZ0gtlX31Rj+h+QB4DrMKZWMu++qEDF6NPLz2ktNcjOYBT29VVqX8ALKKFO+jOK +ZASnUXXE3XnwbccwU9VIQ+3mom3K+GjJRGxsWNZsrPy364eQHckomcOptgk5ldI1 +eF7t5w0xQ4hx6jrJBcKJL29SQAcmUO+vu+6Vg6synBpnlqM9mSe8Xlo3SQ9bROJ9 +1unhrml0Jr1tJZfbM/kX7xhFUVc2kQHqYz6pwYl3fYceHk5dVj9IWaJj82Sfi1QQ +il3DQb9t3y4oJcYQxR9OzyjiKPiCAkIDakYshZP/bb/ZfDy1szIIL0e3mKLUcdFc +3sqAvcBsPt+SngnTtodkAK1ddTuxjHUN3+XpIAUoNtqv02g47JCmvSQ0NGsPyXIz +2krWQoMlmYaG3N74ybMajTXW3Y8+wbe5moJ+Yt4bPUo61d8rMOVI/+3lU7YIyUj1 +TqbwgHCvZRvaiXJQbC9lP7mbQipQhtwQgGMw9TdQB/oHldmDNETF4eNX11LC73+G +-----END RSA PRIVATE KEY----- diff --git a/vendor/libssh2/tests/key_rsa_encrypted.pub b/vendor/libssh2/tests/key_rsa_encrypted.pub new file mode 100644 index 0000000000..5c041d8e0f --- /dev/null +++ b/vendor/libssh2/tests/key_rsa_encrypted.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC92YlGoc4PJy6DzX916JJZhxkvmkWBLGJdWOL7R9B6iaYEKebBxzTE3P1RcnxnuI06kklVq/KcDP9sLlgawTZcDg7ifM7HncPOi18OON8vvVVzodikHzuupjhpI5YTT9wwV2fDVi2URsBjvX4AFiZ5WM3/NwqdKpYABzWieBikXGJ58Tsnw+zQw2qMmKKESBuzSN538loTAj5iEH/GAKYDbbH9t2a17qhNCNEw4vrtURT9JqwO1cOg7N1OKpmqCPEbK0wuSTljNC230VJ06X/8UqahWWSH6MreGy6gwpPi6i9wFiFLur301R0dTPiKVhz6bguhcC1EAlhSgjfelFJt awl03@bounty diff --git a/vendor/libssh2/tests/key_rsa_openssh b/vendor/libssh2/tests/key_rsa_openssh new file mode 100644 index 0000000000..3562a16366 --- /dev/null +++ b/vendor/libssh2/tests/key_rsa_openssh @@ -0,0 +1,27 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAQEA03tZTdi/8nrdDGtSc15EH97dX/qWrgC3nNhbBmcvOykSVtQDsXE0 +4nj45RcD9cn0itZVl0Prn9G+tYJCqqkedhQ5MtuByVrmJX15REDJ9nfzzQzVQw2zuE1ysj +ccVBxSeqmDDXeJFozh/uq5mKFirFlft5g0Wx2oG1TxGC/MHqfDk6ijqq7lS1T82cmGZAbZ ++FzhYulBPFioklXStQJtTuVMb5Q/ebd9nmHIesEPWs4DKo2urKXvy+VCYD/N0GRZJ1Qt8D +2VpI6qJlRapdBaWkHJRDcMmPzmTMa9HE/3+2wi+rOAP9V6W7BpgtMWpOP0xx2zp/tC3SHo +9pxlfCRaEQAAA8gL9Cg6C/QoOgAAAAdzc2gtcnNhAAABAQDTe1lN2L/yet0Ma1JzXkQf3t +1f+pauALec2FsGZy87KRJW1AOxcTTiePjlFwP1yfSK1lWXQ+uf0b61gkKqqR52FDky24HJ +WuYlfXlEQMn2d/PNDNVDDbO4TXKyNxxUHFJ6qYMNd4kWjOH+6rmYoWKsWV+3mDRbHagbVP +EYL8wep8OTqKOqruVLVPzZyYZkBtn4XOFi6UE8WKiSVdK1Am1O5UxvlD95t32eYch6wQ9a +zgMqja6spe/L5UJgP83QZFknVC3wPZWkjqomVFql0FpaQclENwyY/OZMxr0cT/f7bCL6s4 +A/1XpbsGmC0xak4/THHbOn+0LdIej2nGV8JFoRAAAAAwEAAQAAAQAykM27lVXf7oyoCYk/ +WIzFag5YgpxAop9Ee17YWxep95oQ9MSlSsIwXGh2rlgeDtnP0IvKjUzre8UztR+nmqRT62 +X5yQ5xTLC2yheSwEMKEYhTwPvE+qO8L5h7ED5Pxi3acmmJcMlwgOMQhqM14XCscPo39cae ++qpVTqwO8m7F7Tu/GCQWKTDE6FekoX13/bYbnsgd7FZGTyc37rQ2kuergYeIRewrdTD3JB +ne6LmRVbMEuGh9WbXfXFLr+5p79xgnTPs+whdoyQTY8+O3052D8yMV7UcU+T9A0zHFyU9E +VT/SvTgMTF7icThTtVR6Vn095ahe77wh363N0JEe1rwBAAAAgQCSqhkKVowJSPw+Ho6PNk +lKcXWCutA8sVg+x+MaIdnzTe9TbxItm/XW4zj1Ax1rJeEgAaCKQVwH5oJDeC3awNZZ5ZY9 +GK6h4ueyolzVP5wwalR9HeY/S+wdRgaIvYmIpHewLAj/o5ykE2Ijzgf3+HdaNlRxwWXz1i +8ArMV1AwB8WwAAAIEA75OHcAo8RUM7EoU165FZp7nqBphKuGMb8Os/p2xbNC8MYz5CyDXy +fzYZC3i67uGXyTTVLtl54+kzuciuZ/qLHJT49JY/AtOm+rmpXKACNQIZeEnCML8AewLDEg +ugXuFCMIFR4/fupCjGv/tTVHvsh6LJ/td3+DQmisVG3uDnGDEAAACBAOH6xeQ5Z/VPFV1b ++ZxutTMjFghLce50L6fSHpBbIN00vS+9I4TmXYI1XFvaFjHShYUrFifWiMFGBNjuoqRY+c +9/8UDvptdiXLqqLkw3SNB/UqUQRtZkD384Eazxud+FMfMguFBrgmkWYwAh9EVAzXrbzxQd +U9To5SerEitsWsfhAAAAEHdpbGxAaUN1YmUubG9jYWwBAg== +-----END OPENSSH PRIVATE KEY----- diff --git a/vendor/libssh2/tests/key_rsa_openssh.pub b/vendor/libssh2/tests/key_rsa_openssh.pub new file mode 100644 index 0000000000..9fc4ac2742 --- /dev/null +++ b/vendor/libssh2/tests/key_rsa_openssh.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTe1lN2L/yet0Ma1JzXkQf3t1f+pauALec2FsGZy87KRJW1AOxcTTiePjlFwP1yfSK1lWXQ+uf0b61gkKqqR52FDky24HJWuYlfXlEQMn2d/PNDNVDDbO4TXKyNxxUHFJ6qYMNd4kWjOH+6rmYoWKsWV+3mDRbHagbVPEYL8wep8OTqKOqruVLVPzZyYZkBtn4XOFi6UE8WKiSVdK1Am1O5UxvlD95t32eYch6wQ9azgMqja6spe/L5UJgP83QZFknVC3wPZWkjqomVFql0FpaQclENwyY/OZMxr0cT/f7bCL6s4A/1XpbsGmC0xak4/THHbOn+0LdIej2nGV8JFoR will@iCube.local diff --git a/vendor/libssh2/tests/libssh2_config_cmake.h.in b/vendor/libssh2/tests/libssh2_config_cmake.h.in index 51461f2567..4df27ecdc0 100644 --- a/vendor/libssh2/tests/libssh2_config_cmake.h.in +++ b/vendor/libssh2/tests/libssh2_config_cmake.h.in @@ -37,6 +37,7 @@ /* Headers */ #cmakedefine HAVE_UNISTD_H #cmakedefine HAVE_INTTYPES_H +#cmakedefine HAVE_SYS_PARAM_H #cmakedefine HAVE_SYS_SOCKET_H #cmakedefine HAVE_ARPA_INET_H #cmakedefine HAVE_NETINET_IN_H diff --git a/vendor/libssh2/tests/openssh_fixture.c b/vendor/libssh2/tests/openssh_fixture.c index 185ef87f0b..0480c5d542 100644 --- a/vendor/libssh2/tests/openssh_fixture.c +++ b/vendor/libssh2/tests/openssh_fixture.c @@ -57,113 +57,183 @@ #include #include #include +#include -static int run_command(const char *command, char **output) +static int run_command_varg(char **output, const char *command, va_list args) { FILE *pipe; + char redirect_stderr[] = "%s 2>&1"; char command_buf[BUFSIZ]; + char buf[BUFSIZ]; int ret; - if (output) { + size_t buf_len; + + if(output) { *output = NULL; } - /* Rewrite the command to redirect stderr to stdout to we can output it */ - ret = snprintf(command_buf, sizeof(command_buf), "%s 2>&1", command); - if (ret < 0 || ret >= BUFSIZ) { + /* Format the command string */ + ret = vsnprintf(command_buf, sizeof(command_buf), command, args); + if(ret < 0 || ret >= BUFSIZ) { fprintf(stderr, "Unable to format command (%s)\n", command); return -1; } + /* Rewrite the command to redirect stderr to stdout to we can output it */ + if(strlen(command_buf) + strlen(redirect_stderr) >= sizeof(buf)) { + fprintf(stderr, "Unable to rewrite command (%s)\n", command); + return -1; + } + + ret = snprintf(buf, sizeof(buf), redirect_stderr, command_buf); + if(ret < 0 || ret >= BUFSIZ) { + fprintf(stderr, "Unable to rewrite command (%s)\n", command); + return -1; + } + fprintf(stdout, "Command: %s\n", command); #ifdef WIN32 - pipe = _popen(command_buf, "r"); + pipe = _popen(buf, "r"); #else - pipe = popen(command_buf, "r"); + pipe = popen(buf, "r"); #endif - if (pipe) { - char buf[BUFSIZ]; - char *p = buf; - while (fgets(p, sizeof(buf) - (p - buf), pipe) != NULL) - ; + if(!pipe) { + fprintf(stderr, "Unable to execute command '%s'\n", command); + return -1; + } + buf[0] = 0; + buf_len = 0; + while(buf_len < (sizeof(buf) - 1) && + fgets(&buf[buf_len], sizeof(buf) - buf_len, pipe) != NULL) { + buf_len = strlen(buf); + } #ifdef WIN32 - ret = _pclose(pipe); + ret = _pclose(pipe); #else - ret = pclose(pipe); + ret = pclose(pipe); #endif - if (ret == 0) { - if (output) { - /* command output may contain a trailing newline, so we trim - * whitespace here */ - size_t end = strlen(buf) - 1; - while (end > 0 && isspace(buf[end])) { - buf[end] = '\0'; - } - - *output = strdup(buf); - } - } - else { - fprintf(stderr, "Error running command '%s' (exit %d): %s\n", - command, ret, buf); - } - return ret; + if(ret != 0) { + fprintf(stderr, "Error running command '%s' (exit %d): %s\n", + command, ret, buf); } - else { - fprintf(stderr, "Unable to execute command '%s'\n", command); - return -1; + + if(output) { + /* command output may contain a trailing newline, so we trim + * whitespace here */ + size_t end = strlen(buf); + while(end > 0 && isspace(buf[end - 1])) { + buf[end - 1] = '\0'; + } + + *output = strdup(buf); } + return ret; } -static int build_openssh_server_docker_image() +static int run_command(char **output, const char *command, ...) { - return run_command("docker build -t libssh2/openssh_server openssh_server", - NULL); + va_list args; + int ret; + + va_start(args, command); + ret = run_command_varg(output, command, args); + va_end(args); + + return ret; } -static int start_openssh_server(char **container_id_out) +static int build_openssh_server_docker_image(void) { - return run_command("docker run --detach -P libssh2/openssh_server", - container_id_out); + return run_command(NULL, "docker build -t libssh2/openssh_server " + "openssh_server"); } -static int stop_openssh_server(char *container_id) +static const char *openssh_server_port(void) { - char command_buf[BUFSIZ]; - int rc = snprintf(command_buf, sizeof(command_buf), "docker stop %s", - container_id); - if (rc > -1 && rc < BUFSIZ) { - return run_command(command_buf, NULL); + return getenv("OPENSSH_SERVER_PORT"); +} + +static int start_openssh_server(char **container_id_out) +{ + const char *container_host_port = openssh_server_port(); + if(container_host_port != NULL) { + return run_command(container_id_out, + "docker run --rm -d -p %s:22 " + "libssh2/openssh_server", + container_host_port); } else { - return rc; + return run_command(container_id_out, + "docker run --rm -d -p 22 " + "libssh2/openssh_server"); } } -static const char *docker_machine_name() +static int stop_openssh_server(char *container_id) +{ + return run_command(NULL, "docker stop %s", container_id); +} + +static const char *docker_machine_name(void) { return getenv("DOCKER_MACHINE_NAME"); } +static int is_running_inside_a_container() +{ +#ifdef WIN32 + return 0; +#else + const char *cgroup_filename = "/proc/self/cgroup"; + FILE *f = NULL; + char *line = NULL; + size_t len = 0; + ssize_t read = 0; + int found = 0; + f = fopen(cgroup_filename, "r"); + if(f == NULL) { + /* Don't go further, we are not in a container */ + return 0; + } + while((read = getline(&line, &len, f)) != -1) { + if(strstr(line, "docker") != NULL) { + found = 1; + break; + } + } + fclose(f); + free(line); + return found; +#endif +} + +static unsigned int portable_sleep(unsigned int seconds) +{ +#ifdef WIN32 + Sleep(seconds); +#else + sleep(seconds); +#endif +} + static int ip_address_from_container(char *container_id, char **ip_address_out) { const char *active_docker_machine = docker_machine_name(); - if (active_docker_machine != NULL) { + if(active_docker_machine != NULL) { - // This can be flaky when tests run in parallel (see - // https://github.com/docker/machine/issues/2612), so we retry a few - // times with exponential backoff if it fails + /* This can be flaky when tests run in parallel (see + https://github.com/docker/machine/issues/2612), so we retry a few + times with exponential backoff if it fails */ int attempt_no = 0; int wait_time = 500; - for (;;) { - char command_buf[BUFSIZ]; - int rc = snprintf(command_buf, sizeof(command_buf), - "docker-machine ip %s", active_docker_machine); - if (rc > -1 && rc < BUFSIZ) { - return run_command(command_buf, ip_address_out); + for(;;) { + int ret = run_command(ip_address_out, "docker-machine ip %s", + active_docker_machine); + if(ret == 0) { + return 0; } - - if (attempt_no > 5) { + else if(attempt_no > 5) { fprintf( stderr, "Unable to get IP from docker-machine after %d attempts\n", @@ -171,107 +241,120 @@ static int ip_address_from_container(char *container_id, char **ip_address_out) return -1; } else { -#ifdef WIN32 -#pragma warning(push) -#pragma warning(disable : 4996) - _sleep(wait_time); -#pragma warning(pop) -#else - sleep(wait_time); -#endif + portable_sleep(wait_time); ++attempt_no; wait_time *= 2; } } } else { - char command_buf[BUFSIZ]; - int rc = snprintf( - command_buf, sizeof(command_buf), - "docker inspect --format \"{{ index (index (index " - ".NetworkSettings.Ports \\\"22/tcp\\\") 0) \\\"HostIp\\\" }}\" %s", - container_id); - if (rc > -1 && rc < BUFSIZ) { - return run_command(command_buf, ip_address_out); + if(is_running_inside_a_container()) { + return run_command(ip_address_out, + "docker inspect --format " + "\"{{ .NetworkSettings.IPAddress }}\"" + " %s", + container_id); } else { - return rc; + return run_command(ip_address_out, + "docker inspect --format " + "\"{{ index (index (index " + ".NetworkSettings.Ports " + "\\\"22/tcp\\\") 0) \\\"HostIp\\\" }}\" %s", + container_id); } } } static int port_from_container(char *container_id, char **port_out) { - char command_buf[BUFSIZ]; - int rc = snprintf( - command_buf, sizeof(command_buf), - "docker inspect --format \"{{ index (index (index " - ".NetworkSettings.Ports \\\"22/tcp\\\") 0) \\\"HostPort\\\" }}\" %s", - container_id); - if (rc > -1 && rc < BUFSIZ) { - return run_command(command_buf, port_out); + if(is_running_inside_a_container()) { + *port_out = strdup("22"); + return 0; } else { - return rc; + return run_command(port_out, + "docker inspect --format " + "\"{{ index (index (index .NetworkSettings.Ports " + "\\\"22/tcp\\\") 0) \\\"HostPort\\\" }}\" %s", + container_id); } } static int open_socket_to_container(char *container_id) { char *ip_address = NULL; + char *port_string = NULL; + unsigned long hostaddr; + int sock; + struct sockaddr_in sin; + int counter = 0; int ret = ip_address_from_container(container_id, &ip_address); - if (ret == 0) { - char *port_string = NULL; - ret = port_from_container(container_id, &port_string); - if (ret == 0) { - unsigned long hostaddr = inet_addr(ip_address); - if (hostaddr != (unsigned long)(-1)) { - int sock = socket(AF_INET, SOCK_STREAM, 0); - if (sock > -1) { - struct sockaddr_in sin; - - sin.sin_family = AF_INET; - sin.sin_port = htons((short)strtol(port_string, NULL, 0)); - sin.sin_addr.s_addr = hostaddr; - - if (connect(sock, (struct sockaddr *)(&sin), - sizeof(struct sockaddr_in)) == 0) { - ret = sock; - } - else { - fprintf(stderr, "Failed to connect to %s:%s\n", - ip_address, port_string); - ret = -1; - } - } - else { - fprintf(stderr, "Failed to open socket (%d)\n", sock); - ret = -1; - } - } - else { - fprintf(stderr, "Failed to convert %s host address\n", - ip_address); - ret = -1; - } + if(ret != 0) { + fprintf(stderr, "Failed to get IP address for container %s\n", + container_id); + ret = -1; + goto cleanup; + } - free(port_string); - } - else { - fprintf(stderr, "Failed to get port for container %s\n", - container_id); - ret = -1; - } + ret = port_from_container(container_id, &port_string); + if(ret != 0) { + fprintf(stderr, "Failed to get port for container %s\n", + container_id); + ret = -1; + } + /* 0.0.0.0 is returned by Docker for Windows, because the container + is reachable from anywhere. But we cannot connect to 0.0.0.0, + instead we assume localhost and try to connect to 127.0.0.1. */ + if(ip_address && strcmp(ip_address, "0.0.0.0") == 0) { free(ip_address); + ip_address = strdup("127.0.0.1"); } - else { - fprintf(stderr, "Failed to get IP address for container %s\n", - container_id); + + hostaddr = inet_addr(ip_address); + if(hostaddr == (unsigned long)(-1)) { + fprintf(stderr, "Failed to convert %s host address\n", ip_address); + ret = -1; + goto cleanup; + } + + sock = socket(AF_INET, SOCK_STREAM, 0); + if(sock <= 0) { + fprintf(stderr, "Failed to open socket (%d)\n", sock); ret = -1; + goto cleanup; } + sin.sin_family = AF_INET; + sin.sin_port = htons((short)strtol(port_string, NULL, 0)); + sin.sin_addr.s_addr = hostaddr; + + for(counter = 0; counter < 3; ++counter) { + if(connect(sock, (struct sockaddr *)(&sin), + sizeof(struct sockaddr_in)) != 0) { + ret = -1; + fprintf(stderr, + "Connection to %s:%s attempt #%d failed: retrying...\n", + ip_address, port_string, counter); + portable_sleep(1 + 2*counter); + } + else { + ret = sock; + break; + } + } + if(ret == -1) { + fprintf(stderr, "Failed to connect to %s:%s\n", + ip_address, port_string); + goto cleanup; + } + +cleanup: + free(ip_address); + free(port_string); + return ret; } @@ -284,14 +367,14 @@ int start_openssh_fixture() WSADATA wsadata; ret = WSAStartup(MAKEWORD(2, 0), &wsadata); - if (ret != 0) { + if(ret != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", ret); return 1; } #endif ret = build_openssh_server_docker_image(); - if (ret == 0) { + if(ret == 0) { return start_openssh_server(&running_container_id); } else { @@ -302,7 +385,7 @@ int start_openssh_fixture() void stop_openssh_fixture() { - if (running_container_id) { + if(running_container_id) { stop_openssh_server(running_container_id); free(running_container_id); running_container_id = NULL; diff --git a/vendor/libssh2/tests/openssh_server/Dockerfile b/vendor/libssh2/tests/openssh_server/Dockerfile index 2848106263..c5ce2224d0 100644 --- a/vendor/libssh2/tests/openssh_server/Dockerfile +++ b/vendor/libssh2/tests/openssh_server/Dockerfile @@ -50,10 +50,27 @@ COPY ssh_host_rsa_key /tmp/etc/ssh/ssh_host_rsa_key RUN mv /tmp/etc/ssh/ssh_host_rsa_key /etc/ssh/ssh_host_rsa_key RUN chmod 600 /etc/ssh/ssh_host_rsa_key +COPY ssh_host_ecdsa_key /tmp/etc/ssh/ssh_host_ecdsa_key +RUN mv /tmp/etc/ssh/ssh_host_ecdsa_key /etc/ssh/ssh_host_ecdsa_key +RUN chmod 600 /etc/ssh/ssh_host_ecdsa_key + +COPY ssh_host_ed25519_key /tmp/etc/ssh/ssh_host_ed25519_key +RUN mv /tmp/etc/ssh/ssh_host_ed25519_key /etc/ssh/ssh_host_ed25519_key +RUN chmod 600 /etc/ssh/ssh_host_ed25519_key + +COPY ca_ecdsa.pub /tmp/etc/ssh/ca_ecdsa.pub +RUN mv /tmp/etc/ssh/ca_ecdsa.pub /etc/ssh/ca_ecdsa.pub +RUN chmod 600 /etc/ssh/ca_ecdsa.pub + +COPY ca_ecdsa /tmp/etc/ssh/ca_ecdsa +RUN mv /tmp/etc/ssh/ca_ecdsa /etc/ssh/ca_ecdsa +RUN chmod 600 /etc/ssh/ca_ecdsa + RUN adduser --disabled-password --gecos 'Test user for libssh2 integration tests' libssh2 RUN echo 'libssh2:my test password' | chpasswd RUN sed -i 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/' /etc/ssh/sshd_config +RUN echo "TrustedUserCAKeys /etc/ssh/ca_ecdsa.pub" >> /etc/ssh/sshd_config # SSH login fix. Otherwise user is kicked off after login RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd diff --git a/vendor/libssh2/tests/openssh_server/authorized_keys b/vendor/libssh2/tests/openssh_server/authorized_keys index 3ae35e6d4b..cdd6eef505 100644 --- a/vendor/libssh2/tests/openssh_server/authorized_keys +++ b/vendor/libssh2/tests/openssh_server/authorized_keys @@ -1,2 +1,7 @@ ssh-dss AAAAB3NzaC1kc3MAAACBAK2Jh2Ck+8W1+LsFrjgOIH7XHySiONPSdG+faFTZprinh9cjyR3odzntVA7+UuFH14WnGM/ub6MbAXjrxDo1TzGILvW5x6nQ6hdLu7xFygihZ8sO1mIMOVqGdlNbTiYHl8XGjbLt1iXfW8ThM91LGGqmS+cgEiy0wWHYzsOXTDz9AAAAFQD/ebunYNTluoBrEYIoq3LMtQPbcwAAAIEAjPBzkUKcmfMAmb0eO/QAVXmX+L8NC6Vn2m4QguQ2IcJ8NH6VMnxXEBHsnemCOa9jN55G+LnX17PViuKS0O3rqQiSdA5wcHyCHKBT519/v1KQNymDwudfnFvdxUyAAG6MDSxKlpbXDCbrhFd2+ahC9a7rKalRPSXR0R2hhWRvjK0AAACAJ+CGwV/1S4j1GVwa6pSP0nj4V86GWXosTTBg7GT+rKWu8lrxIcr6FzLWgFi/gHoMrgnKWGxO1yF7vkoYM5Yfo84oBYiH+MgpiBuOrZrgzacHsA66JJbUfrESRFWZl2blIPr6Gyjj6cVGgMabK3yCiTRi0v7hwffpm0rKyKv7Goo= awl03@bounty ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAnak1T7zHJ+hVRFBDQ9pf1KVzmd5gaNc7y7NPmL13aOG3sYeJevi1x1WM/R3tb8XnUnzZUX9GJN0MYovvZsw9bknG1mDP72LFbGp/gzPddGIKHBBpvceDaJ85sM/ME3XOtD7uuXQuNAuEHwEzSMMiSIEMcQS+lXIcMLr5xPLEkyNvqsO5RqSjMTLHKHgY8gLWx7oQ1avokhwuDxF7P3Pqtj+rW2Te6vR0i1H6EyFPsBkzkgNXb33cus8M1CnTmYTSgJgmHO2LLcGpjQ5sL8T/PWIWHaSqTnkrFXEMysgoteXnAYILjzyBaqq2WV4KA3TluGdAP2p8gC32QtKmIuis3Q== awl03@bounty +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC92YlGoc4PJy6DzX916JJZhxkvmkWBLGJdWOL7R9B6iaYEKebBxzTE3P1RcnxnuI06kklVq/KcDP9sLlgawTZcDg7ifM7HncPOi18OON8vvVVzodikHzuupjhpI5YTT9wwV2fDVi2URsBjvX4AFiZ5WM3/NwqdKpYABzWieBikXGJ58Tsnw+zQw2qMmKKESBuzSN538loTAj5iEH/GAKYDbbH9t2a17qhNCNEw4vrtURT9JqwO1cOg7N1OKpmqCPEbK0wuSTljNC230VJ06X/8UqahWWSH6MreGy6gwpPi6i9wFiFLur301R0dTPiKVhz6bguhcC1EAlhSgjfelFJt awl03@bounty +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTe1lN2L/yet0Ma1JzXkQf3t1f+pauALec2FsGZy87KRJW1AOxcTTiePjlFwP1yfSK1lWXQ+uf0b61gkKqqR52FDky24HJWuYlfXlEQMn2d/PNDNVDDbO4TXKyNxxUHFJ6qYMNd4kWjOH+6rmYoWKsWV+3mDRbHagbVPEYL8wep8OTqKOqruVLVPzZyYZkBtn4XOFi6UE8WKiSVdK1Am1O5UxvlD95t32eYch6wQ9azgMqja6spe/L5UJgP83QZFknVC3wPZWkjqomVFql0FpaQclENwyY/OZMxr0cT/f7bCL6s4A/1XpbsGmC0xak4/THHbOn+0LdIej2nGV8JFoR +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIxtdyg2ZRXE70UwyPVUH3UyfDBV8GX5cPF636P6hjom +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICHxEyUTOVHXvdMFARedFQ+H9DW/n8Zy3daKKRqnTDMq +ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBOiyJJwf+hFJrOkik9IQVxjhhVnI1vKl4BGLj03erLvw4TXXwoZzlPx6J936fU9JvvPw+d8PA4viMEfFbmdbHREKhwi3u1vZrfj0mB8SXw96AcZwX8PAL556wgom+/Qx+Q== diff --git a/vendor/libssh2/tests/openssh_server/ca_ecdsa b/vendor/libssh2/tests/openssh_server/ca_ecdsa new file mode 100644 index 0000000000..d6b670c585 --- /dev/null +++ b/vendor/libssh2/tests/openssh_server/ca_ecdsa @@ -0,0 +1,12 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAArAAAABNlY2RzYS +1zaGEyLW5pc3RwNTIxAAAACG5pc3RwNTIxAAAAhQQAfv15s+G2xg56J+audKAM4G9qOTFr +bZRo0CTwvkb/oHrf9/2RSWqYsx/0m5mYCZVlecnZqwRHAOolXbc/Yb4cGjsALUj3UDirsn +YR7Ve+SwnunkpvW/H3a98sA3sS+HCpd5RbpfWClSBOI9JEAlPtS1CrEQ7EmO7hmlFOH2cL +0qfHCyYAAAEA763VSe+t1UkAAAATZWNkc2Etc2hhMi1uaXN0cDUyMQAAAAhuaXN0cDUyMQ +AAAIUEAH79ebPhtsYOeifmrnSgDOBvajkxa22UaNAk8L5G/6B63/f9kUlqmLMf9JuZmAmV +ZXnJ2asERwDqJV23P2G+HBo7AC1I91A4q7J2Ee1XvksJ7p5Kb1vx92vfLAN7EvhwqXeUW6 +X1gpUgTiPSRAJT7UtQqxEOxJju4ZpRTh9nC9KnxwsmAAAAQgD8VJwi9RHYN13CAfhvdmjW +xVjH55J5jDjPlENU2Z+cnm01SQ+9mPFEY4wDSvfiovD1VstNJX/P97WbHw+e5XL+HwAAAA +JDQQ== +-----END OPENSSH PRIVATE KEY----- diff --git a/vendor/libssh2/tests/openssh_server/ca_ecdsa.pub b/vendor/libssh2/tests/openssh_server/ca_ecdsa.pub new file mode 100644 index 0000000000..5086eabe43 --- /dev/null +++ b/vendor/libssh2/tests/openssh_server/ca_ecdsa.pub @@ -0,0 +1 @@ +ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAB+/Xmz4bbGDnon5q50oAzgb2o5MWttlGjQJPC+Rv+get/3/ZFJapizH/SbmZgJlWV5ydmrBEcA6iVdtz9hvhwaOwAtSPdQOKuydhHtV75LCe6eSm9b8fdr3ywDexL4cKl3lFul9YKVIE4j0kQCU+1LUKsRDsSY7uGaUU4fZwvSp8cLJg== CA diff --git a/vendor/libssh2/tests/openssh_server/ssh_host_ecdsa_key b/vendor/libssh2/tests/openssh_server/ssh_host_ecdsa_key new file mode 100644 index 0000000000..0164b523ef --- /dev/null +++ b/vendor/libssh2/tests/openssh_server/ssh_host_ecdsa_key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIKdqGrp+52U1ehslMI4fX0cmvgHFmKSkMzQGmj6B07ecoAoGCCqGSM49 +AwEHoUQDQgAEL7+zLJ4okP10LZkf1DuIkZF5HhgzetQIyxLKeTJeiN19IKUYIxjs +m9aW3fQRKNi/GhN9JEbHpa9qpgr+8+hhDg== +-----END EC PRIVATE KEY----- diff --git a/vendor/libssh2/tests/openssh_server/ssh_host_ed25519_key b/vendor/libssh2/tests/openssh_server/ssh_host_ed25519_key new file mode 100644 index 0000000000..bfb1ad566d --- /dev/null +++ b/vendor/libssh2/tests/openssh_server/ssh_host_ed25519_key @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACCMbXcoNmUVxO9FMMj1VB91MnwwVfBl+XDxet+j+oY6JgAAAJg8nvUxPJ71 +MQAAAAtzc2gtZWQyNTUxOQAAACCMbXcoNmUVxO9FMMj1VB91MnwwVfBl+XDxet+j+oY6Jg +AAAECnhCuTDYdz3kUn48BXkaCXXdbKdH7wSIQ/CUx1cbnR0Ixtdyg2ZRXE70UwyPVUH3Uy +fDBV8GX5cPF636P6hjomAAAAEHdpbGxAaUN1YmUubG9jYWwBAgMEBQ== +-----END OPENSSH PRIVATE KEY----- diff --git a/vendor/libssh2/tests/ossfuzz/Makefile.am b/vendor/libssh2/tests/ossfuzz/Makefile.am new file mode 100644 index 0000000000..a7e95825a9 --- /dev/null +++ b/vendor/libssh2/tests/ossfuzz/Makefile.am @@ -0,0 +1,32 @@ +AM_CPPFLAGS = -I$(top_builddir)/include +LDADD = $(top_builddir)/src/libssh2.la + +if USE_OSSFUZZ_FLAG +FUZZ_FLAG = $(LIB_FUZZING_ENGINE) +else +if USE_OSSFUZZ_STATIC +LDADD += $(LIB_FUZZING_ENGINE) +FUZZ_FLAG = +else +LDADD += libstandaloneengine.a +FUZZ_FLAG = +endif +endif + +noinst_PROGRAMS = +noinst_LIBRARIES = + +if USE_OSSFUZZERS +noinst_PROGRAMS += \ + ssh2_client_fuzzer + +noinst_LIBRARIES += \ + libstandaloneengine.a +endif + +ssh2_client_fuzzer_SOURCES = ssh2_client_fuzzer.cc testinput.h +ssh2_client_fuzzer_CXXFLAGS = $(AM_CXXFLAGS) $(FUZZ_FLAG) +ssh2_client_fuzzer_LDFLAGS = $(AM_LDFLAGS) -static + +libstandaloneengine_a_SOURCES = standaloneengine.cc +libstandaloneengine_a_CXXFLAGS = $(AM_CXXFLAGS) diff --git a/vendor/libssh2/tests/ossfuzz/Makefile.in b/vendor/libssh2/tests/ossfuzz/Makefile.in new file mode 100644 index 0000000000..9fa6d259c6 --- /dev/null +++ b/vendor/libssh2/tests/ossfuzz/Makefile.in @@ -0,0 +1,731 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +@USE_OSSFUZZ_FLAG_FALSE@@USE_OSSFUZZ_STATIC_TRUE@am__append_1 = $(LIB_FUZZING_ENGINE) +@USE_OSSFUZZ_FLAG_FALSE@@USE_OSSFUZZ_STATIC_FALSE@am__append_2 = libstandaloneengine.a +noinst_PROGRAMS = $(am__EXEEXT_1) +@USE_OSSFUZZERS_TRUE@am__append_3 = \ +@USE_OSSFUZZERS_TRUE@ ssh2_client_fuzzer + +@USE_OSSFUZZERS_TRUE@am__append_4 = \ +@USE_OSSFUZZERS_TRUE@ libstandaloneengine.a + +subdir = tests/ossfuzz +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/autobuild.m4 \ + $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \ + $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/acinclude.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/src/libssh2_config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +@USE_OSSFUZZERS_TRUE@am__EXEEXT_1 = ssh2_client_fuzzer$(EXEEXT) +PROGRAMS = $(noinst_PROGRAMS) +LIBRARIES = $(noinst_LIBRARIES) +ARFLAGS = cru +AM_V_AR = $(am__v_AR_@AM_V@) +am__v_AR_ = $(am__v_AR_@AM_DEFAULT_V@) +am__v_AR_0 = @echo " AR " $@; +am__v_AR_1 = +libstandaloneengine_a_AR = $(AR) $(ARFLAGS) +libstandaloneengine_a_LIBADD = +am_libstandaloneengine_a_OBJECTS = \ + libstandaloneengine_a-standaloneengine.$(OBJEXT) +libstandaloneengine_a_OBJECTS = $(am_libstandaloneengine_a_OBJECTS) +am_ssh2_client_fuzzer_OBJECTS = \ + ssh2_client_fuzzer-ssh2_client_fuzzer.$(OBJEXT) +ssh2_client_fuzzer_OBJECTS = $(am_ssh2_client_fuzzer_OBJECTS) +ssh2_client_fuzzer_LDADD = $(LDADD) +am__DEPENDENCIES_1 = +@USE_OSSFUZZ_FLAG_FALSE@@USE_OSSFUZZ_STATIC_TRUE@am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) +ssh2_client_fuzzer_DEPENDENCIES = $(top_builddir)/src/libssh2.la \ + $(am__DEPENDENCIES_2) $(am__append_2) +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +ssh2_client_fuzzer_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ + $(ssh2_client_fuzzer_CXXFLAGS) $(CXXFLAGS) \ + $(ssh2_client_fuzzer_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/src +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = \ + ./$(DEPDIR)/libstandaloneengine_a-standaloneengine.Po \ + ./$(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Po +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(libstandaloneengine_a_SOURCES) \ + $(ssh2_client_fuzzer_SOURCES) +DIST_SOURCES = $(libstandaloneengine_a_SOURCES) \ + $(ssh2_client_fuzzer_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +ALLOCA = @ALLOCA@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_LIBBCRYPT = @HAVE_LIBBCRYPT@ +HAVE_LIBCRYPT32 = @HAVE_LIBCRYPT32@ +HAVE_LIBGCRYPT = @HAVE_LIBGCRYPT@ +HAVE_LIBMBEDCRYPTO = @HAVE_LIBMBEDCRYPTO@ +HAVE_LIBSSL = @HAVE_LIBSSL@ +HAVE_LIBZ = @HAVE_LIBZ@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBBCRYPT = @LIBBCRYPT@ +LIBBCRYPT_PREFIX = @LIBBCRYPT_PREFIX@ +LIBCRYPT32 = @LIBCRYPT32@ +LIBCRYPT32_PREFIX = @LIBCRYPT32_PREFIX@ +LIBGCRYPT = @LIBGCRYPT@ +LIBGCRYPT_PREFIX = @LIBGCRYPT_PREFIX@ +LIBMBEDCRYPTO = @LIBMBEDCRYPTO@ +LIBMBEDCRYPTO_PREFIX = @LIBMBEDCRYPTO_PREFIX@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBSREQUIRED = @LIBSREQUIRED@ +LIBSSH2VER = @LIBSSH2VER@ +LIBSSL = @LIBSSL@ +LIBSSL_PREFIX = @LIBSSL_PREFIX@ +LIBTOOL = @LIBTOOL@ +LIBZ = @LIBZ@ +LIBZ_PREFIX = @LIBZ_PREFIX@ +LIB_FUZZING_ENGINE = @LIB_FUZZING_ENGINE@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBBCRYPT = @LTLIBBCRYPT@ +LTLIBCRYPT32 = @LTLIBCRYPT32@ +LTLIBGCRYPT = @LTLIBGCRYPT@ +LTLIBMBEDCRYPTO = @LTLIBMBEDCRYPTO@ +LTLIBOBJS = @LTLIBOBJS@ +LTLIBSSL = @LTLIBSSL@ +LTLIBZ = @LTLIBZ@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SSHD = @SSHD@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AM_CPPFLAGS = -I$(top_builddir)/include +LDADD = $(top_builddir)/src/libssh2.la $(am__append_1) $(am__append_2) +@USE_OSSFUZZ_FLAG_FALSE@@USE_OSSFUZZ_STATIC_FALSE@FUZZ_FLAG = +@USE_OSSFUZZ_FLAG_FALSE@@USE_OSSFUZZ_STATIC_TRUE@FUZZ_FLAG = +@USE_OSSFUZZ_FLAG_TRUE@FUZZ_FLAG = $(LIB_FUZZING_ENGINE) +noinst_LIBRARIES = $(am__append_4) +ssh2_client_fuzzer_SOURCES = ssh2_client_fuzzer.cc testinput.h +ssh2_client_fuzzer_CXXFLAGS = $(AM_CXXFLAGS) $(FUZZ_FLAG) +ssh2_client_fuzzer_LDFLAGS = $(AM_LDFLAGS) -static +libstandaloneengine_a_SOURCES = standaloneengine.cc +libstandaloneengine_a_CXXFLAGS = $(AM_CXXFLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .cc .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tests/ossfuzz/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign tests/ossfuzz/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +clean-noinstPROGRAMS: + @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list + +clean-noinstLIBRARIES: + -test -z "$(noinst_LIBRARIES)" || rm -f $(noinst_LIBRARIES) + +libstandaloneengine.a: $(libstandaloneengine_a_OBJECTS) $(libstandaloneengine_a_DEPENDENCIES) $(EXTRA_libstandaloneengine_a_DEPENDENCIES) + $(AM_V_at)-rm -f libstandaloneengine.a + $(AM_V_AR)$(libstandaloneengine_a_AR) libstandaloneengine.a $(libstandaloneengine_a_OBJECTS) $(libstandaloneengine_a_LIBADD) + $(AM_V_at)$(RANLIB) libstandaloneengine.a + +ssh2_client_fuzzer$(EXEEXT): $(ssh2_client_fuzzer_OBJECTS) $(ssh2_client_fuzzer_DEPENDENCIES) $(EXTRA_ssh2_client_fuzzer_DEPENDENCIES) + @rm -f ssh2_client_fuzzer$(EXEEXT) + $(AM_V_CXXLD)$(ssh2_client_fuzzer_LINK) $(ssh2_client_fuzzer_OBJECTS) $(ssh2_client_fuzzer_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libstandaloneengine_a-standaloneengine.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.cc.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cc.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cc.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +libstandaloneengine_a-standaloneengine.o: standaloneengine.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libstandaloneengine_a_CXXFLAGS) $(CXXFLAGS) -MT libstandaloneengine_a-standaloneengine.o -MD -MP -MF $(DEPDIR)/libstandaloneengine_a-standaloneengine.Tpo -c -o libstandaloneengine_a-standaloneengine.o `test -f 'standaloneengine.cc' || echo '$(srcdir)/'`standaloneengine.cc +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libstandaloneengine_a-standaloneengine.Tpo $(DEPDIR)/libstandaloneengine_a-standaloneengine.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='standaloneengine.cc' object='libstandaloneengine_a-standaloneengine.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libstandaloneengine_a_CXXFLAGS) $(CXXFLAGS) -c -o libstandaloneengine_a-standaloneengine.o `test -f 'standaloneengine.cc' || echo '$(srcdir)/'`standaloneengine.cc + +libstandaloneengine_a-standaloneengine.obj: standaloneengine.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libstandaloneengine_a_CXXFLAGS) $(CXXFLAGS) -MT libstandaloneengine_a-standaloneengine.obj -MD -MP -MF $(DEPDIR)/libstandaloneengine_a-standaloneengine.Tpo -c -o libstandaloneengine_a-standaloneengine.obj `if test -f 'standaloneengine.cc'; then $(CYGPATH_W) 'standaloneengine.cc'; else $(CYGPATH_W) '$(srcdir)/standaloneengine.cc'; fi` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libstandaloneengine_a-standaloneengine.Tpo $(DEPDIR)/libstandaloneengine_a-standaloneengine.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='standaloneengine.cc' object='libstandaloneengine_a-standaloneengine.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libstandaloneengine_a_CXXFLAGS) $(CXXFLAGS) -c -o libstandaloneengine_a-standaloneengine.obj `if test -f 'standaloneengine.cc'; then $(CYGPATH_W) 'standaloneengine.cc'; else $(CYGPATH_W) '$(srcdir)/standaloneengine.cc'; fi` + +ssh2_client_fuzzer-ssh2_client_fuzzer.o: ssh2_client_fuzzer.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ssh2_client_fuzzer_CXXFLAGS) $(CXXFLAGS) -MT ssh2_client_fuzzer-ssh2_client_fuzzer.o -MD -MP -MF $(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Tpo -c -o ssh2_client_fuzzer-ssh2_client_fuzzer.o `test -f 'ssh2_client_fuzzer.cc' || echo '$(srcdir)/'`ssh2_client_fuzzer.cc +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Tpo $(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='ssh2_client_fuzzer.cc' object='ssh2_client_fuzzer-ssh2_client_fuzzer.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ssh2_client_fuzzer_CXXFLAGS) $(CXXFLAGS) -c -o ssh2_client_fuzzer-ssh2_client_fuzzer.o `test -f 'ssh2_client_fuzzer.cc' || echo '$(srcdir)/'`ssh2_client_fuzzer.cc + +ssh2_client_fuzzer-ssh2_client_fuzzer.obj: ssh2_client_fuzzer.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ssh2_client_fuzzer_CXXFLAGS) $(CXXFLAGS) -MT ssh2_client_fuzzer-ssh2_client_fuzzer.obj -MD -MP -MF $(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Tpo -c -o ssh2_client_fuzzer-ssh2_client_fuzzer.obj `if test -f 'ssh2_client_fuzzer.cc'; then $(CYGPATH_W) 'ssh2_client_fuzzer.cc'; else $(CYGPATH_W) '$(srcdir)/ssh2_client_fuzzer.cc'; fi` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Tpo $(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='ssh2_client_fuzzer.cc' object='ssh2_client_fuzzer-ssh2_client_fuzzer.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ssh2_client_fuzzer_CXXFLAGS) $(CXXFLAGS) -c -o ssh2_client_fuzzer-ssh2_client_fuzzer.obj `if test -f 'ssh2_client_fuzzer.cc'; then $(CYGPATH_W) 'ssh2_client_fuzzer.cc'; else $(CYGPATH_W) '$(srcdir)/ssh2_client_fuzzer.cc'; fi` + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) $(LIBRARIES) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLIBRARIES \ + clean-noinstPROGRAMS mostlyclean-am + +distclean: distclean-am + -rm -f ./$(DEPDIR)/libstandaloneengine_a-standaloneengine.Po + -rm -f ./$(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Po + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f ./$(DEPDIR)/libstandaloneengine_a-standaloneengine.Po + -rm -f ./$(DEPDIR)/ssh2_client_fuzzer-ssh2_client_fuzzer.Po + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ + clean-generic clean-libtool clean-noinstLIBRARIES \ + clean-noinstPROGRAMS cscopelist-am ctags ctags-am distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/vendor/libssh2/tests/ossfuzz/ssh2_client_fuzzer.cc b/vendor/libssh2/tests/ossfuzz/ssh2_client_fuzzer.cc new file mode 100644 index 0000000000..d9f5ab516c --- /dev/null +++ b/vendor/libssh2/tests/ossfuzz/ssh2_client_fuzzer.cc @@ -0,0 +1,90 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "testinput.h" + +#define FUZZ_ASSERT(COND) \ + if(!(COND)) \ + { \ + fprintf(stderr, "Assertion failed: " #COND "\n%s", \ + strerror(errno)); \ + assert((COND)); \ + } + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) +{ + int socket_fds[2] = {-1, -1}; + ssize_t written; + int rc; + LIBSSH2_SESSION *session = NULL; + int handshake_completed = 0; + + rc = libssh2_init(0); + + if(rc != 0) { + fprintf(stderr, "libssh2 initialization failed (%d)\n", rc); + goto EXIT_LABEL; + } + + // Create a socket pair so data can be sent in. + rc = socketpair(AF_UNIX, SOCK_STREAM, 0, socket_fds); + FUZZ_ASSERT(rc == 0); + + written = send(socket_fds[1], data, size, 0); + + if (written != size) + { + // Handle whatever error case we're in. + fprintf(stderr, "send() of %zu bytes returned %zu (%d)\n", + size, + written, + errno); + goto EXIT_LABEL; + } + + rc = shutdown(socket_fds[1], SHUT_WR); + if (rc != 0) + { + fprintf(stderr, "socket shutdown failed (%d)\n", rc); + goto EXIT_LABEL; + } + + // Create a session and start the handshake using the fuzz data passed in. + session = libssh2_session_init(); + if(session) { + libssh2_session_set_blocking(session, 1); + } + + if(libssh2_session_handshake(session, socket_fds[0])) { + goto EXIT_LABEL; + } + + // If we get here the handshake actually completed. + handshake_completed = 1; + +EXIT_LABEL: + + if (session != NULL) + { + if (handshake_completed) + { + libssh2_session_disconnect(session, + "Normal Shutdown, Thank you for playing"); + } + + libssh2_session_free(session); + } + + libssh2_exit(); + + close(socket_fds[0]); + close(socket_fds[1]); + + return 0; +} diff --git a/vendor/libssh2/tests/ossfuzz/standaloneengine.cc b/vendor/libssh2/tests/ossfuzz/standaloneengine.cc new file mode 100644 index 0000000000..175360e4a2 --- /dev/null +++ b/vendor/libssh2/tests/ossfuzz/standaloneengine.cc @@ -0,0 +1,74 @@ +#include +#include +#include + +#include "testinput.h" + +/** + * Main procedure for standalone fuzzing engine. + * + * Reads filenames from the argument array. For each filename, read the file + * into memory and then call the fuzzing interface with the data. + */ +int main(int argc, char **argv) +{ + int ii; + for(ii = 1; ii < argc; ii++) + { + FILE *infile; + printf("[%s] ", argv[ii]); + + /* Try and open the file. */ + infile = fopen(argv[ii], "rb"); + if(infile) + { + uint8_t *buffer = NULL; + size_t buffer_len; + + printf("Opened.. "); + + /* Get the length of the file. */ + fseek(infile, 0L, SEEK_END); + buffer_len = ftell(infile); + + /* Reset the file indicator to the beginning of the file. */ + fseek(infile, 0L, SEEK_SET); + + /* Allocate a buffer for the file contents. */ + buffer = (uint8_t *)calloc(buffer_len, sizeof(uint8_t)); + if(buffer) + { + /* Read all the text from the file into the buffer. */ + fread(buffer, sizeof(uint8_t), buffer_len, infile); + printf("Read %zu bytes, fuzzing.. ", buffer_len); + + /* Call the fuzzer with the data. */ + LLVMFuzzerTestOneInput(buffer, buffer_len); + + printf("complete !!"); + + /* Free the buffer as it's no longer needed. */ + free(buffer); + buffer = NULL; + } + else + { + fprintf(stderr, + "[%s] Failed to allocate %zu bytes \n", + argv[ii], + buffer_len); + } + + /* Close the file as it's no longer needed. */ + fclose(infile); + infile = NULL; + } + else + { + /* Failed to open the file. Maybe wrong name or wrong permissions? */ + fprintf(stderr, "[%s] Open failed. \n", argv[ii]); + } + + printf("\n"); + } +} diff --git a/vendor/libssh2/tests/ossfuzz/testinput.h b/vendor/libssh2/tests/ossfuzz/testinput.h new file mode 100644 index 0000000000..6ab9b515e4 --- /dev/null +++ b/vendor/libssh2/tests/ossfuzz/testinput.h @@ -0,0 +1,3 @@ +#include + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size); diff --git a/vendor/libssh2/tests/runner.c b/vendor/libssh2/tests/runner.c index efd05ef4da..b9f9328df7 100644 --- a/vendor/libssh2/tests/runner.c +++ b/vendor/libssh2/tests/runner.c @@ -43,7 +43,7 @@ int main() { int exit_code = 1; LIBSSH2_SESSION *session = start_session_fixture(); - if (session != NULL) { + if(session != NULL) { exit_code = (test(session) == 0) ? 0 : 1; } stop_session_fixture(); diff --git a/vendor/libssh2/tests/session_fixture.c b/vendor/libssh2/tests/session_fixture.c index 6985275e60..5d8fd2156e 100644 --- a/vendor/libssh2/tests/session_fixture.c +++ b/vendor/libssh2/tests/session_fixture.c @@ -40,6 +40,10 @@ #include "openssh_fixture.h" #include +#include +#ifdef HAVE_UNISTD_H +#include +#endif #ifdef HAVE_WINDOWS_H #include @@ -50,63 +54,85 @@ #ifdef HAVE_SYS_SOCKET_H #include #endif +#ifdef HAVE_SYS_PARAM_H +#include +#endif LIBSSH2_SESSION *connected_session = NULL; int connected_socket = -1; static int connect_to_server() { + int rc; connected_socket = open_socket_to_openssh_server(); - if (connected_socket > -1) { - int rc = libssh2_session_handshake(connected_session, connected_socket); - if (rc == 0) { - return 0; - } - else { - print_last_session_error("libssh2_session_handshake"); - return -1; - } + if(connected_socket <= 0) { + return -1; } - else { + + rc = libssh2_session_handshake(connected_session, connected_socket); + if(rc != 0) { + print_last_session_error("libssh2_session_handshake"); return -1; } + + return 0; +} + +void setup_fixture_workdir() +{ + char *wd = getenv("FIXTURE_WORKDIR"); +#ifdef FIXTURE_WORKDIR + if(!wd) { + wd = FIXTURE_WORKDIR; + } +#endif + if(!wd) { +#ifdef WIN32 + char wd_buf[_MAX_PATH]; +#else + char wd_buf[MAXPATHLEN]; +#endif + getcwd(wd_buf, sizeof(wd_buf)); + wd = wd_buf; + } + + chdir(wd); } LIBSSH2_SESSION *start_session_fixture() { - int rc = start_openssh_fixture(); - if (rc == 0) { - rc = libssh2_init(0); - if (rc == 0) { - connected_session = libssh2_session_init_ex(NULL, NULL, NULL, NULL); - libssh2_session_set_blocking(connected_session, 1); - if (connected_session != NULL) { - rc = connect_to_server(); - if (rc == 0) { - return connected_session; - } - else { - return NULL; - } - } - else { - fprintf(stderr, "libssh2_session_init_ex failed\n"); - return NULL; - } - } - else { - fprintf(stderr, "libssh2_init failed (%d)\n", rc); - return NULL; - } + int rc; + + setup_fixture_workdir(); + + rc = start_openssh_fixture(); + if(rc != 0) { + return NULL; } - else { + rc = libssh2_init(0); + if(rc != 0) { + fprintf(stderr, "libssh2_init failed (%d)\n", rc); + return NULL; + } + + connected_session = libssh2_session_init_ex(NULL, NULL, NULL, NULL); + libssh2_session_set_blocking(connected_session, 1); + if(connected_session == NULL) { + fprintf(stderr, "libssh2_session_init_ex failed\n"); + return NULL; + } + + rc = connect_to_server(); + if(rc != 0) { return NULL; } + + return connected_session; } void print_last_session_error(const char *function) { - if (connected_session) { + if(connected_session) { char *message; int rc = libssh2_session_last_error(connected_session, &message, NULL, 0); @@ -119,7 +145,7 @@ void print_last_session_error(const char *function) void stop_session_fixture() { - if (connected_session) { + if(connected_session) { libssh2_session_disconnect(connected_session, "test ended"); libssh2_session_free(connected_session); shutdown(connected_socket, 2); diff --git a/vendor/libssh2/tests/simple.c b/vendor/libssh2/tests/simple.c index 0a5b03c02c..e97f7d3398 100644 --- a/vendor/libssh2/tests/simple.c +++ b/vendor/libssh2/tests/simple.c @@ -41,28 +41,27 @@ #include "libssh2.h" -static int test_libssh2_base64_decode (LIBSSH2_SESSION *session) +static int test_libssh2_base64_decode(LIBSSH2_SESSION *session) { char *data; unsigned int datalen; const char *src = "Zm5vcmQ="; - unsigned int src_len = strlen (src); + unsigned int src_len = strlen(src); int ret; ret = libssh2_base64_decode(session, &data, &datalen, src, src_len); - if (ret) + if(ret) return ret; - if (datalen != 5 || strcmp (data, "fnord") != 0) - { - fprintf (stderr, - "libssh2_base64_decode() failed (%d, %.*s)\n", - datalen, datalen, data); + if(datalen != 5 || strcmp(data, "fnord") != 0) { + fprintf(stderr, + "libssh2_base64_decode() failed (%d, %.*s)\n", + datalen, datalen, data); return 1; } - free (data); + free(data); return 0; } @@ -74,25 +73,23 @@ int main(int argc, char *argv[]) (void)argv; (void)argc; - rc = libssh2_init (LIBSSH2_INIT_NO_CRYPTO); - if (rc != 0) - { - fprintf (stderr, "libssh2_init() failed: %d\n", rc); + rc = libssh2_init(LIBSSH2_INIT_NO_CRYPTO); + if(rc != 0) { + fprintf(stderr, "libssh2_init() failed: %d\n", rc); return 1; } session = libssh2_session_init(); - if (!session) - { - fprintf (stderr, "libssh2_session_init() failed\n"); + if(!session) { + fprintf(stderr, "libssh2_session_init() failed\n"); return 1; } - test_libssh2_base64_decode (session); + test_libssh2_base64_decode(session); libssh2_session_free(session); - libssh2_exit (); + libssh2_exit(); return 0; } diff --git a/vendor/libssh2/tests/ssh2.c b/vendor/libssh2/tests/ssh2.c index f8b6a0d5e2..f903e07585 100644 --- a/vendor/libssh2/tests/ssh2.c +++ b/vendor/libssh2/tests/ssh2.c @@ -39,18 +39,18 @@ int main(int argc, char *argv[]) char *userauthlist; LIBSSH2_SESSION *session; LIBSSH2_CHANNEL *channel; - const char *pubkeyfile="etc/user.pub"; - const char *privkeyfile="etc/user"; - const char *username="username"; - const char *password="password"; + const char *pubkeyfile = "etc/user.pub"; + const char *privkeyfile = "etc/user"; + const char *username = "username"; + const char *password = "password"; int ec = 1; #ifdef WIN32 WSADATA wsadata; int err; - err = WSAStartup(MAKEWORD(2,0), &wsadata); - if (err != 0) { + err = WSAStartup(MAKEWORD(2, 0), &wsadata); + if(err != 0) { fprintf(stderr, "WSAStartup failed with error: %d\n", err); return -1; } @@ -59,14 +59,14 @@ int main(int argc, char *argv[]) (void)argc; (void)argv; - if (getenv ("USER")) - username = getenv ("USER"); + if(getenv("USER")) + username = getenv("USER"); - if (getenv ("PRIVKEY")) - privkeyfile = getenv ("PRIVKEY"); + if(getenv ("PRIVKEY")) + privkeyfile = getenv("PRIVKEY"); - if (getenv ("PUBKEY")) - pubkeyfile = getenv ("PUBKEY"); + if(getenv("PUBKEY")) + pubkeyfile = getenv("PUBKEY"); hostaddr = htonl(0x7F000001); @@ -77,24 +77,27 @@ int main(int argc, char *argv[]) sin.sin_family = AF_INET; sin.sin_port = htons(4711); sin.sin_addr.s_addr = hostaddr; - if (connect(sock, (struct sockaddr*)(&sin), + if(connect(sock, (struct sockaddr*)(&sin), sizeof(struct sockaddr_in)) != 0) { fprintf(stderr, "failed to connect!\n"); return 1; } /* Create a session instance and start it up - * This will trade welcome banners, exchange keys, and setup crypto, compression, and MAC layers + * This will trade welcome banners, exchange keys, + * and setup crypto, compression, and MAC layers */ session = libssh2_session_init(); - if (libssh2_session_startup(session, sock)) { + if(libssh2_session_startup(session, sock)) { fprintf(stderr, "Failure establishing SSH session\n"); return 1; } - /* At this point we havn't authenticated, - * The first thing to do is check the hostkey's fingerprint against our known hosts - * Your app may have it hard coded, may go to a file, may present it to the user, that's your call + /* At this point we haven't authenticated, + * The first thing to do is check the hostkey's + * fingerprint against our known hosts + * Your app may have it hard coded, may go to a file, + * may present it to the user, that's your call */ fingerprint = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_SHA1); printf("Fingerprint: "); @@ -106,31 +109,35 @@ int main(int argc, char *argv[]) /* check what authentication methods are available */ userauthlist = libssh2_userauth_list(session, username, strlen(username)); printf("Authentication methods: %s\n", userauthlist); - if (strstr(userauthlist, "password") != NULL) { + if(strstr(userauthlist, "password") != NULL) { auth_pw |= 1; } - if (strstr(userauthlist, "keyboard-interactive") != NULL) { + if(strstr(userauthlist, "keyboard-interactive") != NULL) { auth_pw |= 2; } - if (strstr(userauthlist, "publickey") != NULL) { + if(strstr(userauthlist, "publickey") != NULL) { auth_pw |= 4; } - if (auth_pw & 4) { + if(auth_pw & 4) { /* Authenticate by public key */ - if (libssh2_userauth_publickey_fromfile(session, username, pubkeyfile, privkeyfile, password)) { + if(libssh2_userauth_publickey_fromfile(session, username, pubkeyfile, + privkeyfile, password)) { printf("\tAuthentication by public key failed!\n"); goto shutdown; - } else { + } + else { printf("\tAuthentication by public key succeeded.\n"); } - } else { + } + else { printf("No supported authentication methods found!\n"); goto shutdown; } /* Request a shell */ - if (!(channel = libssh2_channel_open_session(session))) { + channel = libssh2_channel_open_session(session); + if(!channel) { fprintf(stderr, "Unable to open a session\n"); goto shutdown; } @@ -143,13 +150,13 @@ int main(int argc, char *argv[]) /* Request a terminal with 'vanilla' terminal emulation * See /etc/termcap for more options */ - if (libssh2_channel_request_pty(channel, "vanilla")) { + if(libssh2_channel_request_pty(channel, "vanilla")) { fprintf(stderr, "Failed requesting pty\n"); goto skip_shell; } /* Open a SHELL on that pty */ - if (libssh2_channel_shell(channel)) { + if(libssh2_channel_shell(channel)) { fprintf(stderr, "Unable to request shell on allocated pty\n"); goto shutdown; } @@ -157,7 +164,7 @@ int main(int argc, char *argv[]) ec = 0; skip_shell: - if (channel) { + if(channel) { libssh2_channel_free(channel); channel = NULL; } diff --git a/vendor/libssh2/tests/test_agent_forward_succeeds.c b/vendor/libssh2/tests/test_agent_forward_succeeds.c new file mode 100644 index 0000000000..daf7bd5ac1 --- /dev/null +++ b/vendor/libssh2/tests/test_agent_forward_succeeds.c @@ -0,0 +1,51 @@ +#include "session_fixture.h" + +#include + +#include + +const char *USERNAME = "libssh2"; /* set in Dockerfile */ +const char *KEY_FILE_PRIVATE = "key_rsa"; +const char *KEY_FILE_PUBLIC = "key_rsa.pub"; /* set in Dockerfile */ + +int test(LIBSSH2_SESSION *session) +{ + int rc; + LIBSSH2_CHANNEL *channel; + + const char *userauth_list = + libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + rc = libssh2_userauth_publickey_fromfile_ex( + session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, + NULL); + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + channel = libssh2_channel_open_session(session); + /* if(channel == NULL) { */ + /* printf("Error opening channel\n"); */ + /* return 1; */ + /* } */ + + rc = libssh2_channel_request_auth_agent(channel); + if(rc != 0) { + fprintf(stderr, "Auth agent request for agent forwarding failed, " + "error code %d\n", rc); + return 1; + } + + return 0; +} diff --git a/vendor/libssh2/tests/test_hostkey.c b/vendor/libssh2/tests/test_hostkey.c index 63c2063f2b..e33f68f960 100644 --- a/vendor/libssh2/tests/test_hostkey.c +++ b/vendor/libssh2/tests/test_hostkey.c @@ -4,7 +4,7 @@ #include -const char *EXPECTED_HOSTKEY = +static const char *EXPECTED_RSA_HOSTKEY = "AAAAB3NzaC1yc2EAAAABIwAAAQEArrr/JuJmaZligyfS8vcNur+mWR2ddDQtVdhHzdKU" "UoR6/Om6cvxpe61H1YZO1xCpLUBXmkki4HoNtYOpPB2W4V+8U4BDeVBD5crypEOE1+7B" "Am99fnEDxYIOZq2/jTP0yQmzCpWYS3COyFmkOL7sfX1wQMeW5zQT2WKcxC6FSWbhDqrB" @@ -12,6 +12,10 @@ const char *EXPECTED_HOSTKEY = "i6ELfP3r+q6wdu0P4jWaoo3De1aYxnToV/ldXykpipON4NPamsb6Ph2qlJQKypq7J4iQ" "gkIIbCU1A31+4ExvcIVoxLQw/aTSbw=="; +static const char *EXPECTED_ECDSA_HOSTKEY = + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC+/syyeKJD9dC2ZH" + "9Q7iJGReR4YM3rUCMsSynkyXojdfSClGCMY7JvWlt30ESjYvxoTfSRGx6WvaqYK/vPoYQ4="; + int test(LIBSSH2_SESSION *session) { int rc; @@ -21,31 +25,38 @@ int test(LIBSSH2_SESSION *session) char *expected_hostkey = NULL; const char *hostkey = libssh2_session_hostkey(session, &len, &type); - if (hostkey == NULL) { + if(hostkey == NULL) { print_last_session_error("libssh2_session_hostkey"); return 1; } - if (type != LIBSSH2_HOSTKEY_TYPE_RSA) { - /* Hostkey configured in docker container is RSA */ - fprintf(stderr, "Wrong type of hostkey\n"); + if(type == LIBSSH2_HOSTKEY_TYPE_ECDSA_256) { + rc = libssh2_base64_decode(session, &expected_hostkey, &expected_len, + EXPECTED_ECDSA_HOSTKEY, + strlen(EXPECTED_ECDSA_HOSTKEY)); + } + else if(type == LIBSSH2_HOSTKEY_TYPE_RSA) { + rc = libssh2_base64_decode(session, &expected_hostkey, &expected_len, + EXPECTED_RSA_HOSTKEY, + strlen(EXPECTED_RSA_HOSTKEY)); + } + else { + fprintf(stderr, "Unexpected type of hostkey: %i\n", type); return 1; } - rc = libssh2_base64_decode(session, &expected_hostkey, &expected_len, - EXPECTED_HOSTKEY, strlen(EXPECTED_HOSTKEY)); - if (rc != 0) { + if(rc != 0) { print_last_session_error("libssh2_base64_decode"); return 1; } - if (len != expected_len) { - fprintf(stderr, "Hostkey does not have the expected length %ld != %d\n", - len, expected_len); + if(len != expected_len) { + fprintf(stderr, "Hostkey does not have the expected length %ld!=%d\n", + (unsigned long)len, expected_len); return 1; } - if (memcmp(hostkey, expected_hostkey, len) != 0) { + if(memcmp(hostkey, expected_hostkey, len) != 0) { fprintf(stderr, "Hostkeys do not match\n"); return 1; } diff --git a/vendor/libssh2/tests/test_hostkey_hash.c b/vendor/libssh2/tests/test_hostkey_hash.c index 6fb78d9e2e..112b491f2b 100644 --- a/vendor/libssh2/tests/test_hostkey_hash.c +++ b/vendor/libssh2/tests/test_hostkey_hash.c @@ -5,7 +5,7 @@ #include -const char *EXPECTED_HOSTKEY = +static const char *EXPECTED_RSA_HOSTKEY = "AAAAB3NzaC1yc2EAAAABIwAAAQEArrr/JuJmaZligyfS8vcNur+mWR2ddDQtVdhHzdKU" "UoR6/Om6cvxpe61H1YZO1xCpLUBXmkki4HoNtYOpPB2W4V+8U4BDeVBD5crypEOE1+7B" "Am99fnEDxYIOZq2/jTP0yQmzCpWYS3COyFmkOL7sfX1wQMeW5zQT2WKcxC6FSWbhDqrB" @@ -13,13 +13,31 @@ const char *EXPECTED_HOSTKEY = "i6ELfP3r+q6wdu0P4jWaoo3De1aYxnToV/ldXykpipON4NPamsb6Ph2qlJQKypq7J4iQ" "gkIIbCU1A31+4ExvcIVoxLQw/aTSbw=="; -const char *EXPECTED_MD5_HASH_DIGEST = "0C0ED1A5BB10275F76924CE187CE5C5E"; +static const char *EXPECTED_ECDSA_HOSTKEY = + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBC+/syyeKJD9dC2ZH" + "9Q7iJGReR4YM3rUCMsSynkyXojdfSClGCMY7JvWlt30ESjYvxoTfSRGx6WvaqYK/vPoYQ4="; -const char *EXPECTED_SHA1_HASH_DIGEST = +static const char *EXPECTED_RSA_MD5_HASH_DIGEST = + "0C0ED1A5BB10275F76924CE187CE5C5E"; + +static const char *EXPECTED_RSA_SHA1_HASH_DIGEST = "F3CD59E2913F4422B80F7B0A82B2B89EAE449387"; -const int MD5_HASH_SIZE = 16; -const int SHA1_HASH_SIZE = 20; +static const char *EXPECTED_RSA_SHA256_HASH_DIGEST = + "92E3DA49DF3C7F99A828F505ED8239397A5D1F62914459760F878F7510F563A3"; + +static const char *EXPECTED_ECDSA_MD5_HASH_DIGEST = + "0402E4D897580BBC911379CBD88BCD3D"; + +static const char *EXPECTED_ECDSA_SHA1_HASH_DIGEST = + "12FDAD1E3B31B10BABB00F2A8D1B9A62C326BD2F"; + +static const char *EXPECTED_ECDSA_SHA256_HASH_DIGEST = + "56FCD975B166C3F0342D0036E44C311A86C0EAE40713B53FC776369BAE7F5264"; + +static const int MD5_HASH_SIZE = 16; +static const int SHA1_HASH_SIZE = 20; +static const int SHA256_HASH_SIZE = 32; static void calculate_digest(const char *hash, size_t hash_len, char *buffer, size_t buffer_len) @@ -28,7 +46,7 @@ static void calculate_digest(const char *hash, size_t hash_len, char *buffer, char *p = buffer; char *end = buffer + buffer_len; - for (i = 0; i < hash_len && p < end; ++i) { + for(i = 0; i < hash_len && p < end; ++i) { p += snprintf(p, end - p, "%02X", (unsigned char)hash[i]); } } @@ -37,36 +55,128 @@ int test(LIBSSH2_SESSION *session) { char buf[BUFSIZ]; + const char *hostkey; const char *md5_hash; const char *sha1_hash; + const char *sha256_hash; + int type; + size_t len; + + /* these are the host keys under test, they are currently unused */ + (void)EXPECTED_RSA_HOSTKEY; + (void)EXPECTED_ECDSA_HOSTKEY; - md5_hash = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_MD5); - if (md5_hash == NULL) { - print_last_session_error( - "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_MD5)"); + hostkey = libssh2_session_hostkey(session, &len, &type); + if(hostkey == NULL) { + print_last_session_error("libssh2_session_hostkey"); return 1; } - calculate_digest(md5_hash, MD5_HASH_SIZE, buf, BUFSIZ); + if(type == LIBSSH2_HOSTKEY_TYPE_ECDSA_256) { + + md5_hash = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_MD5); + if(md5_hash == NULL) { + print_last_session_error( + "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_MD5)"); + return 1; + } + + calculate_digest(md5_hash, MD5_HASH_SIZE, buf, BUFSIZ); + + if(strcmp(buf, EXPECTED_ECDSA_MD5_HASH_DIGEST) != 0) { + fprintf(stderr, + "ECDSA MD5 hash not as expected - digest %s != %s\n", + buf, EXPECTED_ECDSA_MD5_HASH_DIGEST); + return 1; + } + + sha1_hash = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_SHA1); + if(sha1_hash == NULL) { + print_last_session_error( + "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_SHA1)"); + return 1; + } + + calculate_digest(sha1_hash, SHA1_HASH_SIZE, buf, BUFSIZ); + + if(strcmp(buf, EXPECTED_ECDSA_SHA1_HASH_DIGEST) != 0) { + fprintf(stderr, + "ECDSA SHA1 hash not as expected - digest %s != %s\n", + buf, EXPECTED_ECDSA_SHA1_HASH_DIGEST); + return 1; + } + + sha256_hash = libssh2_hostkey_hash(session, + LIBSSH2_HOSTKEY_HASH_SHA256); + if(sha256_hash == NULL) { + print_last_session_error( + "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_SHA256)"); + return 1; + } + + calculate_digest(sha256_hash, SHA256_HASH_SIZE, buf, BUFSIZ); + + if(strcmp(buf, EXPECTED_ECDSA_SHA256_HASH_DIGEST) != 0) { + fprintf(stderr, + "ECDSA SHA256 hash not as expected - digest %s != %s\n", + buf, EXPECTED_ECDSA_SHA256_HASH_DIGEST); + return 1; + } - if (strcmp(buf, EXPECTED_MD5_HASH_DIGEST) != 0) { - fprintf(stderr, "MD5 hash not as expected - digest %s != %s\n", buf, - EXPECTED_MD5_HASH_DIGEST); - return 1; } - - sha1_hash = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_SHA1); - if (sha1_hash == NULL) { - print_last_session_error( - "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_SHA1)"); - return 1; + else if(type == LIBSSH2_HOSTKEY_TYPE_RSA) { + + md5_hash = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_MD5); + if(md5_hash == NULL) { + print_last_session_error( + "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_MD5)"); + return 1; + } + + calculate_digest(md5_hash, MD5_HASH_SIZE, buf, BUFSIZ); + + if(strcmp(buf, EXPECTED_RSA_MD5_HASH_DIGEST) != 0) { + fprintf(stderr, + "MD5 hash not as expected - digest %s != %s\n", + buf, EXPECTED_RSA_MD5_HASH_DIGEST); + return 1; + } + + sha1_hash = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_SHA1); + if(sha1_hash == NULL) { + print_last_session_error( + "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_SHA1)"); + return 1; + } + + calculate_digest(sha1_hash, SHA1_HASH_SIZE, buf, BUFSIZ); + + if(strcmp(buf, EXPECTED_RSA_SHA1_HASH_DIGEST) != 0) { + fprintf(stderr, + "SHA1 hash not as expected - digest %s != %s\n", + buf, EXPECTED_RSA_SHA1_HASH_DIGEST); + return 1; + } + + sha256_hash = libssh2_hostkey_hash(session, + LIBSSH2_HOSTKEY_HASH_SHA256); + if(sha256_hash == NULL) { + print_last_session_error( + "libssh2_hostkey_hash(LIBSSH2_HOSTKEY_HASH_SHA256)"); + return 1; + } + + calculate_digest(sha256_hash, SHA256_HASH_SIZE, buf, BUFSIZ); + + if(strcmp(buf, EXPECTED_RSA_SHA256_HASH_DIGEST) != 0) { + fprintf(stderr, + "SHA256 hash not as expected - digest %s != %s\n", + buf, EXPECTED_RSA_SHA256_HASH_DIGEST); + return 1; + } } - - calculate_digest(sha1_hash, SHA1_HASH_SIZE, buf, BUFSIZ); - - if (strcmp(buf, EXPECTED_SHA1_HASH_DIGEST) != 0) { - fprintf(stderr, "SHA1 hash not as expected - digest %s != %s\n", buf, - EXPECTED_SHA1_HASH_DIGEST); + else { + fprintf(stderr, "Unexpected type of hostkey: %i\n", type); return 1; } diff --git a/vendor/libssh2/tests/test_keyboard_interactive_auth_fails_with_wrong_response.c b/vendor/libssh2/tests/test_keyboard_interactive_auth_fails_with_wrong_response.c index beb6608ee9..56b1ba5490 100644 --- a/vendor/libssh2/tests/test_keyboard_interactive_auth_fails_with_wrong_response.c +++ b/vendor/libssh2/tests/test_keyboard_interactive_auth_fails_with_wrong_response.c @@ -4,11 +4,11 @@ #include -const char *USERNAME = "libssh2"; /* configured in Dockerfile */ -const char *WRONG_PASSWORD = "i'm not the password"; +static const char *USERNAME = "libssh2"; /* set in Dockerfile */ +static const char *WRONG_PASSWORD = "i'm not the password"; static void kbd_callback(const char *name, int name_len, - const char *instruction, int instruction_len, + const char *instruct, int instruct_len, int num_prompts, const LIBSSH2_USERAUTH_KBDINT_PROMPT *prompts, LIBSSH2_USERAUTH_KBDINT_RESPONSE *responses, @@ -17,13 +17,13 @@ static void kbd_callback(const char *name, int name_len, int i; (void)abstract; fprintf(stdout, "Kb-int name: %.*s\n", name_len, name); - fprintf(stdout, "Kb-int instruction: %.*s\n", instruction_len, instruction); - for (i = 0; i < num_prompts; ++i) { + fprintf(stdout, "Kb-int instruction: %.*s\n", instruct_len, instruct); + for(i = 0; i < num_prompts; ++i) { fprintf(stdout, "Kb-int prompt %d: %.*s\n", i, prompts[i].length, prompts[i].text); } - if (num_prompts == 1) { + if(num_prompts == 1) { responses[0].text = strdup(WRONG_PASSWORD); responses[0].length = strlen(WRONG_PASSWORD); } @@ -35,12 +35,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "keyboard-interactive") == NULL) { + if(strstr(userauth_list, "keyboard-interactive") == NULL) { fprintf(stderr, "'keyboard-interactive' was expected in userauth list: %s\n", userauth_list); @@ -49,7 +49,7 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_keyboard_interactive_ex( session, USERNAME, strlen(USERNAME), kbd_callback); - if (rc == 0) { + if(rc == 0) { fprintf(stderr, "Keyboard-interactive auth succeeded with wrong response\n"); return 1; diff --git a/vendor/libssh2/tests/test_keyboard_interactive_auth_succeeds_with_correct_response.c b/vendor/libssh2/tests/test_keyboard_interactive_auth_succeeds_with_correct_response.c index aec1dd496d..0ccf5dd903 100644 --- a/vendor/libssh2/tests/test_keyboard_interactive_auth_succeeds_with_correct_response.c +++ b/vendor/libssh2/tests/test_keyboard_interactive_auth_succeeds_with_correct_response.c @@ -4,11 +4,12 @@ #include -const char *USERNAME = "libssh2"; /* configured in Dockerfile */ -const char *PASSWORD = "my test password"; /* configured in Dockerfile */ +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *PASSWORD = "my test password"; static void kbd_callback(const char *name, int name_len, - const char *instruction, int instruction_len, + const char *instruct, int instruct_len, int num_prompts, const LIBSSH2_USERAUTH_KBDINT_PROMPT *prompts, LIBSSH2_USERAUTH_KBDINT_RESPONSE *responses, @@ -18,13 +19,13 @@ static void kbd_callback(const char *name, int name_len, (void)abstract; fprintf(stdout, "Kb-int name: %.*s\n", name_len, name); - fprintf(stdout, "Kb-int instruction: %.*s\n", instruction_len, instruction); - for (i = 0; i < num_prompts; ++i) { + fprintf(stdout, "Kb-int instruction: %.*s\n", instruct_len, instruct); + for(i = 0; i < num_prompts; ++i) { fprintf(stdout, "Kb-int prompt %d: %.*s\n", i, prompts[i].length, prompts[i].text); } - if (num_prompts == 1) { + if(num_prompts == 1) { responses[0].text = strdup(PASSWORD); responses[0].length = strlen(PASSWORD); } @@ -36,12 +37,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "keyboard-interactive") == NULL) { + if(strstr(userauth_list, "keyboard-interactive") == NULL) { fprintf(stderr, "'keyboard-interactive' was expected in userauth list: %s\n", userauth_list); @@ -50,7 +51,7 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_keyboard_interactive_ex( session, USERNAME, strlen(USERNAME), kbd_callback); - if (rc != 0) { + if(rc != 0) { print_last_session_error("libssh2_userauth_keyboard_interactive_ex"); return 1; } diff --git a/vendor/libssh2/tests/test_password_auth_fails_with_wrong_password.c b/vendor/libssh2/tests/test_password_auth_fails_with_wrong_password.c index dc65c1320c..2b895d08e8 100644 --- a/vendor/libssh2/tests/test_password_auth_fails_with_wrong_password.c +++ b/vendor/libssh2/tests/test_password_auth_fails_with_wrong_password.c @@ -4,8 +4,8 @@ #include -const char *USERNAME = "libssh2"; /* configured in Dockerfile */ -const char *WRONG_PASSWORD = "i'm not the password"; +static const char *USERNAME = "libssh2"; /* set in Dockerfile */ +static const char *WRONG_PASSWORD = "i'm not the password"; int test(LIBSSH2_SESSION *session) { @@ -13,12 +13,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "password") == NULL) { + if(strstr(userauth_list, "password") == NULL) { fprintf(stderr, "'password' was expected in userauth list: %s\n", userauth_list); return 1; @@ -27,7 +27,7 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_password_ex(session, USERNAME, strlen(USERNAME), WRONG_PASSWORD, strlen(WRONG_PASSWORD), NULL); - if (rc == 0) { + if(rc == 0) { fprintf(stderr, "Password auth succeeded with wrong password\n"); return 1; } diff --git a/vendor/libssh2/tests/test_password_auth_fails_with_wrong_username.c b/vendor/libssh2/tests/test_password_auth_fails_with_wrong_username.c index 6ea27d42ea..b78617a49c 100644 --- a/vendor/libssh2/tests/test_password_auth_fails_with_wrong_username.c +++ b/vendor/libssh2/tests/test_password_auth_fails_with_wrong_username.c @@ -4,8 +4,9 @@ #include -const char *PASSWORD = "my test password"; /* configured in Dockerfile */ -const char *WRONG_USERNAME = "i dont exist"; +/* configured in Dockerfile */ +static const char *PASSWORD = "my test password"; +static const char *WRONG_USERNAME = "i dont exist"; int test(LIBSSH2_SESSION *session) { @@ -13,12 +14,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, WRONG_USERNAME, strlen(WRONG_USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "password") == NULL) { + if(strstr(userauth_list, "password") == NULL) { fprintf(stderr, "'password' was expected in userauth list: %s\n", userauth_list); return 1; @@ -27,7 +28,7 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_password_ex(session, WRONG_USERNAME, strlen(WRONG_USERNAME), PASSWORD, strlen(PASSWORD), NULL); - if (rc == 0) { + if(rc == 0) { fprintf(stderr, "Password auth succeeded with wrong username\n"); return 1; } diff --git a/vendor/libssh2/tests/test_password_auth_succeeds_with_correct_credentials.c b/vendor/libssh2/tests/test_password_auth_succeeds_with_correct_credentials.c index aaf9c2a385..94b86b8797 100644 --- a/vendor/libssh2/tests/test_password_auth_succeeds_with_correct_credentials.c +++ b/vendor/libssh2/tests/test_password_auth_succeeds_with_correct_credentials.c @@ -4,8 +4,9 @@ #include -const char *USERNAME = "libssh2"; /* configured in Dockerfile */ -const char *PASSWORD = "my test password"; /* configured in Dockerfile */ +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *PASSWORD = "my test password"; int test(LIBSSH2_SESSION *session) { @@ -13,12 +14,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "password") == NULL) { + if(strstr(userauth_list, "password") == NULL) { fprintf(stderr, "'password' was expected in userauth list: %s\n", userauth_list); return 1; @@ -26,12 +27,12 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_password_ex(session, USERNAME, strlen(USERNAME), PASSWORD, strlen(PASSWORD), NULL); - if (rc != 0) { + if(rc != 0) { print_last_session_error("libssh2_userauth_password_ex"); return 1; } - if (libssh2_userauth_authenticated(session) == 0) { + if(libssh2_userauth_authenticated(session) == 0) { fprintf(stderr, "Password auth appeared to succeed but " "libssh2_userauth_authenticated returned 0\n"); return 1; diff --git a/vendor/libssh2/tests/test_public_key_auth_fails_with_wrong_key.c b/vendor/libssh2/tests/test_public_key_auth_fails_with_wrong_key.c index 6e12abf4e2..dd2d254f5a 100644 --- a/vendor/libssh2/tests/test_public_key_auth_fails_with_wrong_key.c +++ b/vendor/libssh2/tests/test_public_key_auth_fails_with_wrong_key.c @@ -4,9 +4,9 @@ #include -const char *USERNAME = "libssh2"; /* configured in Dockerfile */ -const char *KEY_FILE_PRIVATE = "key_dsa_wrong"; -const char *KEY_FILE_PUBLIC = "key_dsa_wrong.pub"; +static const char *USERNAME = "libssh2"; /* set in Dockerfile */ +static const char *KEY_FILE_PRIVATE = "key_dsa_wrong"; +static const char *KEY_FILE_PUBLIC = "key_dsa_wrong.pub"; int test(LIBSSH2_SESSION *session) { @@ -14,12 +14,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "publickey") == NULL) { + if(strstr(userauth_list, "publickey") == NULL) { fprintf(stderr, "'publickey' was expected in userauth list: %s\n", userauth_list); return 1; @@ -28,7 +28,7 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_publickey_fromfile_ex( session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, NULL); - if (rc == 0) { + if(rc == 0) { fprintf(stderr, "Public-key auth succeeded with wrong key\n"); return 1; } diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_dsa_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_dsa_key.c index 4e5b46d042..187c1313fb 100644 --- a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_dsa_key.c +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_dsa_key.c @@ -4,9 +4,10 @@ #include -const char *USERNAME = "libssh2"; /* configured in Dockerfile */ -const char *KEY_FILE_PRIVATE = "key_dsa"; -const char *KEY_FILE_PUBLIC = "key_dsa.pub"; /* configured in Dockerfile */ +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *KEY_FILE_PRIVATE = "key_dsa"; +static const char *KEY_FILE_PUBLIC = "key_dsa.pub"; int test(LIBSSH2_SESSION *session) { @@ -14,12 +15,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "publickey") == NULL) { + if(strstr(userauth_list, "publickey") == NULL) { fprintf(stderr, "'publickey' was expected in userauth list: %s\n", userauth_list); return 1; @@ -28,7 +29,7 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_publickey_fromfile_ex( session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, NULL); - if (rc != 0) { + if(rc != 0) { print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); return 1; } diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ecdsa_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ecdsa_key.c new file mode 100644 index 0000000000..2ea3a3699a --- /dev/null +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ecdsa_key.c @@ -0,0 +1,38 @@ +#include "session_fixture.h" + +#include + +#include + +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *KEY_FILE_PRIVATE = "key_ecdsa"; +static const char *KEY_FILE_PUBLIC = "key_ecdsa.pub"; + +int test(LIBSSH2_SESSION *session) +{ + int rc; + const char *userauth_list = NULL; + + userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + rc = libssh2_userauth_publickey_fromfile_ex( + session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, + NULL); + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + return 0; +} diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ed25519_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ed25519_key.c new file mode 100644 index 0000000000..c52830d94f --- /dev/null +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ed25519_key.c @@ -0,0 +1,38 @@ +#include "session_fixture.h" + +#include + +#include + +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *KEY_FILE_PRIVATE = "key_ed25519"; +static const char *KEY_FILE_PUBLIC = "key_ed25519.pub"; + +int test(LIBSSH2_SESSION *session) +{ + int rc; + const char *userauth_list = NULL; + + userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + rc = libssh2_userauth_publickey_fromfile_ex( + session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, + NULL); + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + return 0; +} diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ed25519_key_from_mem.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ed25519_key_from_mem.c new file mode 100644 index 0000000000..a79d1b5183 --- /dev/null +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_ed25519_key_from_mem.c @@ -0,0 +1,98 @@ +#include "session_fixture.h" + +#include + +#include +#include + +static const char *USERNAME = "libssh2"; /* set in Dockerfile */ +static const char *KEY_FILE_ED25519_PRIVATE = "key_ed25519"; + +int read_file(const char *path, char **buf, size_t *len); + +int test(LIBSSH2_SESSION *session) +{ + int rc; + char *buffer = NULL; + size_t len = 0; + const char *userauth_list = NULL; + + userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + if(read_file(KEY_FILE_ED25519_PRIVATE, &buffer, &len)) { + fprintf(stderr, "Reading key file failed."); + return 1; + } + + rc = libssh2_userauth_publickey_frommemory(session, + USERNAME, strlen(USERNAME), + NULL, 0, + buffer, len, + NULL); + + free(buffer); + + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + return 0; +} + +int read_file(const char *path, char **out_buffer, size_t *out_len) +{ + FILE *fp = NULL; + char *buffer = NULL; + size_t len = 0; + + if(out_buffer == NULL || out_len == NULL || path == NULL) { + fprintf(stderr, "invalid params."); + return 1; + } + + *out_buffer = NULL; + *out_len = 0; + + fp = fopen(path, "r"); + + if(!fp) { + fprintf(stderr, "File could not be read."); + return 1; + } + + fseek(fp, 0L, SEEK_END); + len = ftell(fp); + rewind(fp); + + buffer = calloc(1, len + 1); + if(!buffer) { + fclose(fp); + fprintf(stderr, "Could not alloc memory."); + return 1; + } + + if(1 != fread(buffer, len, 1, fp)) { + fclose(fp); + free(buffer); + fprintf(stderr, "Could not read file into memory."); + return 1; + } + + fclose(fp); + + *out_buffer = buffer; + *out_len = len; + + return 0; +} diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_encrypted_ed25519_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_encrypted_ed25519_key.c new file mode 100644 index 0000000000..553023a99d --- /dev/null +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_encrypted_ed25519_key.c @@ -0,0 +1,39 @@ +#include "session_fixture.h" + +#include + +#include + +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *PASSWORD = "libssh2"; +static const char *KEY_FILE_PRIVATE = "key_ed25519_encrypted"; +static const char *KEY_FILE_PUBLIC = "key_ed25519_encrypted.pub"; + +int test(LIBSSH2_SESSION *session) +{ + int rc; + const char *userauth_list = NULL; + + userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + rc = libssh2_userauth_publickey_fromfile_ex( + session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, + PASSWORD); + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + return 0; +} diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_encrypted_rsa_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_encrypted_rsa_key.c new file mode 100644 index 0000000000..ba98ac7c4a --- /dev/null +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_encrypted_rsa_key.c @@ -0,0 +1,39 @@ +#include "session_fixture.h" + +#include + +#include + +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *PASSWORD = "libssh2"; +static const char *KEY_FILE_PRIVATE = "key_rsa_encrypted"; +static const char *KEY_FILE_PUBLIC = "key_rsa_encrypted.pub"; + +int test(LIBSSH2_SESSION *session) +{ + int rc; + + const char *userauth_list = + libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + rc = libssh2_userauth_publickey_fromfile_ex( + session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, + PASSWORD); + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + return 0; +} diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_rsa_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_rsa_key.c index b02a6425d9..0cf2a6331d 100644 --- a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_rsa_key.c +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_rsa_key.c @@ -4,9 +4,10 @@ #include -const char *USERNAME = "libssh2"; /* configured in Dockerfile */ -const char *KEY_FILE_PRIVATE = "key_rsa"; -const char *KEY_FILE_PUBLIC = "key_rsa.pub"; /* configured in Dockerfile */ +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *KEY_FILE_PRIVATE = "key_rsa"; +static const char *KEY_FILE_PUBLIC = "key_rsa.pub"; int test(LIBSSH2_SESSION *session) { @@ -14,12 +15,12 @@ int test(LIBSSH2_SESSION *session) const char *userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); - if (userauth_list == NULL) { + if(userauth_list == NULL) { print_last_session_error("libssh2_userauth_list"); return 1; } - if (strstr(userauth_list, "publickey") == NULL) { + if(strstr(userauth_list, "publickey") == NULL) { fprintf(stderr, "'publickey' was expected in userauth list: %s\n", userauth_list); return 1; @@ -28,7 +29,7 @@ int test(LIBSSH2_SESSION *session) rc = libssh2_userauth_publickey_fromfile_ex( session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, NULL); - if (rc != 0) { + if(rc != 0) { print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); return 1; } diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_rsa_openssh_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_rsa_openssh_key.c new file mode 100644 index 0000000000..a067d729b8 --- /dev/null +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_rsa_openssh_key.c @@ -0,0 +1,38 @@ +#include "session_fixture.h" + +#include + +#include + +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *KEY_FILE_PRIVATE = "key_rsa_openssh"; +static const char *KEY_FILE_PUBLIC = "key_rsa_openssh.pub"; + +int test(LIBSSH2_SESSION *session) +{ + int rc; + + const char *userauth_list = + libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + rc = libssh2_userauth_publickey_fromfile_ex( + session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, + NULL); + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + return 0; +} diff --git a/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_signed_ecdsa_key.c b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_signed_ecdsa_key.c new file mode 100644 index 0000000000..10b33cbb8d --- /dev/null +++ b/vendor/libssh2/tests/test_public_key_auth_succeeds_with_correct_signed_ecdsa_key.c @@ -0,0 +1,38 @@ +#include "session_fixture.h" + +#include + +#include + +/* configured in Dockerfile */ +static const char *USERNAME = "libssh2"; +static const char *KEY_FILE_PRIVATE = "signed_key_ecdsa"; +static const char *KEY_FILE_PUBLIC = "signed_key_ecdsa-cert.pub"; + +int test(LIBSSH2_SESSION *session) +{ + int rc; + const char *userauth_list = NULL; + + userauth_list = libssh2_userauth_list(session, USERNAME, strlen(USERNAME)); + if(userauth_list == NULL) { + print_last_session_error("libssh2_userauth_list"); + return 1; + } + + if(strstr(userauth_list, "publickey") == NULL) { + fprintf(stderr, "'publickey' was expected in userauth list: %s\n", + userauth_list); + return 1; + } + + rc = libssh2_userauth_publickey_fromfile_ex( + session, USERNAME, strlen(USERNAME), KEY_FILE_PUBLIC, KEY_FILE_PRIVATE, + NULL); + if(rc != 0) { + print_last_session_error("libssh2_userauth_publickey_fromfile_ex"); + return 1; + } + + return 0; +} diff --git a/vendor/libssh2/vms/libssh2_make_help.dcl b/vendor/libssh2/vms/libssh2_make_help.dcl index b36512ecb3..652671da3d 100644 --- a/vendor/libssh2/vms/libssh2_make_help.dcl +++ b/vendor/libssh2/vms/libssh2_make_help.dcl @@ -29,7 +29,7 @@ $ man2help -a [-.docs]AUTHORS.; libssh2.hlp -b 2 $ man2help -a [-.docs]BINDINGS.; libssh2.hlp -b 2 $ man2help -a [-.docs]HACKING.; libssh2.hlp -b 2 $ if f$search("[]HACKING_CRYPTO.") .nes. "" then delete []HACKING_CRYPTO.;* -$ copy [-.docs]HACKING.CRYPTO; []HACKING_CRYPTO. +$ copy [-.docs]HACKING-CRYPTO; []HACKING_CRYPTO. $ man2help -a []HACKING_CRYPTO.; libssh2.hlp -b 2 $ man2help -a [-.docs]TODO.; libssh2.hlp -b 2 $! @@ -56,18 +56,18 @@ $! $ thisdir = f$environment( "procedure" ) $ thisdir = f$parse(thisdir,,,"device") + f$parse(thisdir,,,"directory") $ set default 'thisdir' -$! +$! $ say = "write sys$output" $! -$ pipe search [-.include]*.h libssh2_version_major/nohead | (read sys$input l ; l = f$element(2," ",f$edit(l,"trim,compress")) ; - +$ pipe search [-.include]*.h libssh2_version_major/nohead | (read sys$input l ; l = f$element(2," ",f$edit(l,"trim,compress")) ; - define/job majorv &l ) -$ pipe search [-.include]*.h libssh2_version_minor/nohead | (read sys$input l ; l = f$element(2," ",f$edit(l,"trim,compress")) ; - +$ pipe search [-.include]*.h libssh2_version_minor/nohead | (read sys$input l ; l = f$element(2," ",f$edit(l,"trim,compress")) ; - define/job minorv &l ) -$ pipe search [-.include]*.h libssh2_version_patch/nohead | (read sys$input l ; l = f$element(2," ",f$edit(l,"trim,compress")) ; - +$ pipe search [-.include]*.h libssh2_version_patch/nohead | (read sys$input l ; l = f$element(2," ",f$edit(l,"trim,compress")) ; - define/job patchv &l ) $! $ majorv = f$trnlnm("majorv") -$ minorv = f$integer(f$trnlnm("minorv")) +$ minorv = f$integer(f$trnlnm("minorv")) $ patchv = f$integer( f$trnlnm("patchv")) $! $ helpversion = "This help library is based on build version ''majorv'.''minorv'.''patchv' of libssh2." @@ -81,15 +81,15 @@ $ then $ cc man2help $ link man2help $ endif -$! -$ man2help := $'thisdir'man2help.exe $! -$ if f$search("libssh2.hlp") .nes. "" -$ then +$ man2help := $'thisdir'man2help.exe +$! +$ if f$search("libssh2.hlp") .nes. "" +$ then $ delete libssh2.hlp;* $ endif -$ if f$search("libssh2.hlb") .nes. "" -$ then +$ if f$search("libssh2.hlb") .nes. "" +$ then $ delete libssh2.hlb;* $ endif $return diff --git a/vendor/libssh2/win32/GNUmakefile b/vendor/libssh2/win32/GNUmakefile index 86857513ca..0971d891cc 100644 --- a/vendor/libssh2/win32/GNUmakefile +++ b/vendor/libssh2/win32/GNUmakefile @@ -86,11 +86,26 @@ CAT = type ECHONL = $(ComSpec) /c echo. endif +ifeq ($(LIBSSH2_CC),) +LIBSSH2_CC := $(CROSSPREFIX)gcc +endif +ifeq ($(LIBSSH2_AR),) +LIBSSH2_AR := $(CROSSPREFIX)ar +endif +ifeq ($(LIBSSH2_RANLIB),) +LIBSSH2_RANLIB := $(CROSSPREFIX)ranlib +endif +ifeq ($(LIBSSH2_DLL_A_SUFFIX),) +LIBSSH2_DLL_A_SUFFIX := dll +endif + +libssh2_dll_LIBRARY = $(TARGET)$(LIBSSH2_DLL_SUFFIX).dll + # The following line defines your compiler. ifdef METROWERKS CC = mwcc else - CC = $(CROSSPREFIX)gcc + CC = $(LIBSSH2_CC) endif # Set environment var ARCH to your architecture to override autodetection. @@ -110,7 +125,7 @@ endif -include $(OBJDIR)/version.inc # Global flags for all compilers -CFLAGS = $(LIBSSH2_CFLAG_EXTRAS) $(OPT) -D$(DB) -DLIBSSH2_WIN32 # -DHAVE_CONFIG_H +CFLAGS = $(LIBSSH2_CFLAG_EXTRAS) $(OPT) -D$(DB) -DLIBSSH2_WIN32 -DHAVE_WINDOWS_H # -DHAVE_CONFIG_H LDFLAGS = $(LIBSSH2_LDFLAG_EXTRAS) ifeq ($(CC),mwcc) @@ -128,13 +143,13 @@ CFLAGS += -nostdinc -gccinc -msgstyle gcc -inline off -opt nointrinsics -proc 58 CFLAGS += -ir "$(METROWERKS)/MSL" -ir "$(METROWERKS)/Win32-x86 Support" CFLAGS += -w on,nounused,nounusedexpr # -ansi strict else -LD = $(CROSSPREFIX)gcc -RC = $(CROSSPREFIX)windres -LDFLAGS += -s -shared -Wl,--output-def,$(TARGET).def,--out-implib,$(TARGET)dll.a -AR = $(CROSSPREFIX)ar -ARFLAGS = -cq LIBEXT = a -RANLIB = $(CROSSPREFIX)ranlib +LD = $(LIBSSH2_CC) +RC = $(CROSSPREFIX)windres +LDFLAGS += -s -shared -Wl,--output-def,$(libssh2_dll_LIBRARY:.dll=.def),--out-implib,$(TARGET)$(LIBSSH2_DLL_A_SUFFIX).$(LIBEXT) +AR = $(LIBSSH2_AR) +ARFLAGS = cru +RANLIB = $(LIBSSH2_RANLIB) RCFLAGS = -I $(PROOT)/include -O coff CFLAGS += -fno-builtin CFLAGS += -fno-strict-aliasing @@ -223,7 +238,7 @@ OBJL = $(OBJS) $(OBJDIR)/$(TARGET).res all: lib dll -dll: prebuild $(TARGET).dll +dll: prebuild $(libssh2_dll_LIBRARY) lib: prebuild $(TARGET).$(LIBEXT) @@ -248,7 +263,7 @@ dist: all $(DISTDIR) $(DISTDIR)/readme.txt @$(call COPY, $(PROOT)/INSTALL, $(DISTDIR)) @$(call COPY, $(PROOT)/README, $(DISTDIR)) @$(call COPY, $(PROOT)/RELEASE-NOTES, $(DISTDIR)) - @$(call COPY, $(TARGET).dll, $(DISTDIR)/bin) + @$(call COPY, $(libssh2_dll_LIBRARY), $(DISTDIR)/bin) @echo Creating $(DISTARC) @$(ZIP) $(DISTARC) $(DISTDIR)/* < $(DISTDIR)/readme.txt @@ -261,7 +276,7 @@ dev: all $(DEVLDIR) $(DEVLDIR)/readme.txt @$(call COPY, $(PROOT)/INSTALL, $(DEVLDIR)) @$(call COPY, $(PROOT)/README, $(DEVLDIR)) @$(call COPY, $(PROOT)/RELEASE-NOTES, $(DEVLDIR)) - @$(call COPY, $(TARGET).dll, $(DEVLDIR)/bin) + @$(call COPY, $(libssh2_dll_LIBRARY), $(DEVLDIR)/bin) @$(call COPY, $(PROOT)/include/*.h, $(DEVLDIR)/include) @$(call COPY, libssh2_config.h, $(DEVLDIR)/include) @$(call COPY, *.$(LIBEXT), $(DEVLDIR)/win32) @@ -284,7 +299,7 @@ testclean: clean clean: # $(call DEL, libssh2_config.h) - $(call DEL, $(TARGET).dll $(TARGET).def $(TARGET).$(LIBEXT) $(TARGET)dll.$(LIBEXT)) + $(call DEL, $(libssh2_dll_LIBRARY) $(libssh2_dll_LIBRARY:.dll=.def) $(TARGET).$(LIBEXT) $(TARGET)$(LIBSSH2_DLL_A_SUFFIX).$(LIBEXT)) $(call RMDIR, $(OBJDIR)) $(OBJDIR): @@ -304,7 +319,7 @@ ifdef RANLIB @$(RANLIB) $@ endif -$(TARGET).dll $(TARGET)dll.a: $(OBJL) +$(libssh2_dll_LIBRARY) $(TARGET)$(LIBSSH2_DLL_A_SUFFIX).$(LIBEXT): $(OBJL) @echo Linking $@ @$(call DEL, $@) @$(LD) $(LDFLAGS) $^ -o $@ $(LIBPATH) $(LDLIBS) diff --git a/vendor/libssh2/win32/libssh2.dsp b/vendor/libssh2/win32/libssh2.dsp index eac2b82b1b..1657c7bf88 100644 --- a/vendor/libssh2/win32/libssh2.dsp +++ b/vendor/libssh2/win32/libssh2.dsp @@ -263,6 +263,18 @@ SOURCE=..\src\agent.c # End Source File # Begin Source File +SOURCE=..\src\agent_win.c +# End Source File +# Begin Source File + +SOURCE=..\src\bcrypt_pbkdf.c +# End Source File +# Begin Source File + +SOURCE=..\src\blowfish.c +# End Source File +# Begin Source File + SOURCE=..\src\channel.c # End Source File # Begin Source File @@ -355,6 +367,14 @@ SOURCE=..\src\wincng.c # PROP Default_Filter "h;hpp;hxx" # Begin Source File +SOURCE=..\src\agent.h +# End Source File +# Begin Source File + +SOURCE=..\src\blf.h +# End Source File +# Begin Source File + SOURCE=..\src\channel.h # End Source File # Begin Source File diff --git a/vendor/libssh2/win32/libssh2_config.h b/vendor/libssh2/win32/libssh2_config.h index b6af97806b..6ac2ef43eb 100644 --- a/vendor/libssh2/win32/libssh2_config.h +++ b/vendor/libssh2/win32/libssh2_config.h @@ -18,7 +18,7 @@ #define HAVE_GETTIMEOFDAY #endif /* __MINGW32__ */ -#define LIBSSH2_OPENSSL +#define HAVE_LIBCRYPT32 #define HAVE_WINSOCK2_H #define HAVE_IOCTLSOCKET #define HAVE_SELECT @@ -44,3 +44,4 @@ #define LIBSSH2_DH_GEX_NEW 1 #endif /* LIBSSH2_CONFIG_H */ + diff --git a/vendor/pageant.exe b/vendor/pageant.exe deleted file mode 100644 index f11c378002..0000000000 Binary files a/vendor/pageant.exe and /dev/null differ diff --git a/vendor/pageant/pageant_arm64.exe b/vendor/pageant/pageant_arm64.exe new file mode 100644 index 0000000000..1f5516f6c5 Binary files /dev/null and b/vendor/pageant/pageant_arm64.exe differ diff --git a/vendor/pageant/pageant_x64.exe b/vendor/pageant/pageant_x64.exe new file mode 100644 index 0000000000..9c3b35ff09 Binary files /dev/null and b/vendor/pageant/pageant_x64.exe differ diff --git a/vendor/pageant/pageant_x86.exe b/vendor/pageant/pageant_x86.exe new file mode 100644 index 0000000000..baaa76c7f3 Binary files /dev/null and b/vendor/pageant/pageant_x86.exe differ diff --git a/vendor/patches/README.md b/vendor/patches/README.md new file mode 100644 index 0000000000..898a965fef --- /dev/null +++ b/vendor/patches/README.md @@ -0,0 +1,14 @@ +# Patches directory +Patches for modifying vendor code without including it. + +Patches will be applied from 000 to 999 + +### Naming Convention + +`--.patch` + +Operating system is either "darwin", "linux", or "all". + +### Content + +All patches should start with a description of what they do and why they're needed. \ No newline at end of file diff --git a/vendor/private.ppk b/vendor/private.ppk deleted file mode 100644 index e69837aaa9..0000000000 --- a/vendor/private.ppk +++ /dev/null @@ -1,26 +0,0 @@ -PuTTY-User-Key-File-2: ssh-rsa -Encryption: none -Comment: rsa-key-20141021 -Public-Lines: 6 -AAAAB3NzaC1yc2EAAAABJQAAAQEAkr6tlPb0QODfJJRhwsSsN88TSB6pgk/6x1KM -yVuDE6SeKz5sB4iaQfPleLSaWxuQEKkoa4sLAROy18lSk0PMioQHM6tgXbOcpTbS -Yf6uf1wtAMeFTO7vP0ZuIXbYmBmsK4DK1GWQq3Db5+Q/xbDp20dlCvCPMdokhYFr -N2B9G0C4AKgXRMS+loEXwOeQiupfYcUXfZiCOoviArCcLgRsUNZTeTbP03skLBDr -2xMdpaGx8QgSylz+yGeXRNjXaxE2mnwwVkBnNYsInZhw4/OdxddyilDealpcwp7x -enAwWsoLd9NlpskNCEQsznPMAe66/PKvRGl22+8tpNBzDbnZsw== -Private-Lines: 14 -AAABABPUk/9fpHd4VYF71dwMvVOmXI00k3J5gsD9UUuklSwrAJ4PWrTo8kBDjbZd -mFGAQ+aTZlO41/k6A2lEuCG9Dc2HdphHl2abuzjru5CztrdDzrrqh6Kc1Dj7rgSF -rpEYOdxdgzF1gkCuYuf8P/ge035/RQF6dDcrUQsfU62JlF2gwQpVbQZ97DC9uGtt -ktYY0pSVU36xty4uQ148mOC8TXWFOxaGPOFq14sxmBUFVhHsmHnytQULIkibRCze -bfkpJNAizNKTNCfupd3aub205kzG48blZ3eWxYtK3mreiSDvhdWNWiyVhTajkXGQ -r3a+AqE/8e5Qks7ekzbpKk388a0AAACBAOftyBzFLENp3ccimF7ctlS44H2hfas4 -2PQVbDpCKrY1u0XC+vn/GqgAVq/hcHeK6DrkaEK23Q30phNN7+8BDzR1VxO27TLg -UdwFE0z/0jnTuwkJcusO7bCb3dGGUX4ENSyRpNJVyu4X4iKuz03+ccPD3utkTIMI -zCskK5VQT0MBAAAAgQCh+ZsG6SfakMD76b3zF3xaw8bJmPe+q/YumlDDbM6sG9q+ -3Fp5trBBJbGjXhTCyUkOuWiUh8cmVBFYRHrtz9GLVCOXVd6yYOzHlTKSNRnu7Slt -jdX1s0QphmPlQ6/uyUIAhrIrOkVFKNrV10Kex/7jwwdfL8i1cecng+dxfE4AswAA -AIEAiWClSrfWJTIKa3CZtIxlbHFHNLAVEuDjGeyKIMZ96qDEM7jDQ4vUzAagw60D -qUsJQMyx9RFfSLDYE3TZaXGrGdSXcFBz9f9xLcWLe/2LH7NUmjpUf5GnlMSs7+Br -ET2wiPd6NDf1kka+4+zMOgFqJF44xgDuNLnM3ty4EFlfzlY= -Private-MAC: 6e0ff7c94a3253c5e45b3e06951e04d2b06e6262 diff --git a/vendor/static_config/openssl_distributions.json b/vendor/static_config/openssl_distributions.json deleted file mode 100644 index f00384dd9b..0000000000 --- a/vendor/static_config/openssl_distributions.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "macOS-clang-8.1-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/bd3cca94af79c6a2c35b664c43f643582a13a9f2/conan_package.tgz", - "macOS-clang-8.1-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/0197c20e330042c026560da838f5b4c4bf094b8a/conan_package.tgz", - "macOS-clang-9-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/85d674b0f6705cafe6b2edb8689ffbe0f3c2e60b/conan_package.tgz", - "macOS-clang-9-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/227fb0ea22f4797212e72ba94ea89c7b3fbc2a0c/conan_package.tgz", - "win32-vs12-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/39d6fe009a278f733e97b59a4f9536bfc4e8f366/conan_package.tgz", - "win32-vs12-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/d16d8a16b4cef0046922b8d83d567689d36149d0/conan_package.tgz", - "win32-vs14-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/889fd4ea9ba89fd6dc7fa32e2f45bd9804b85481/conan_package.tgz", - "win32-vs14-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/253958a6ce15f1c9325eeea33ffc0a5cfc29212a/conan_package.tgz", - "win32-vs15-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/05f648ec4d066b206769d6314e859fdd97a18f8d/conan_package.tgz", - "win32-vs15-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/a075e3ffc3590d6a920a26b4218b20253dd68d57/conan_package.tgz", - "win64-vs12-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/6bc3be0f39fdc624b24ba9bb00e8af55928d74e7/conan_package.tgz", - "win64-vs12-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/e942631065059eabe964ca471ad35bb453c15b31/conan_package.tgz", - "win64-vs14-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/867ca54360ed234a8bc9a6aa63806599ea29b38e/conan_package.tgz", - "win64-vs14-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/c4aef4edbc33205e0cf9b55bfb116b38c90ec132/conan_package.tgz", - "win64-vs15-static-debug": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/0bd0c413b56aaec57c0f222a89b4e565a6729027/conan_package.tgz", - "win64-vs15-static-release": "https://dl.bintray.com/conan-community/conan/conan/OpenSSL/1.1.0i/stable/package/fce9be1511a149a4af36b5997f7e611ab83b2f58/conan_package.tgz" -} \ No newline at end of file